Commit f7adf5f5 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Avoid deduplication overhead for backing stores managed by embedder

If a BackingStore is marked as !free_on_destruct, then we don't have to
guarantee that there is only one such BackingStore pointing to the
underlying buffer. So we can skip costly registration in process-global
table of backing stores.

Bug: v8:9380,chromium:1002693
Change-Id: Iad1ec5c4811d6c52a9a9d78dd700acf69170db60
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1815136
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63941}
parent d68bf369
......@@ -7296,12 +7296,10 @@ std::shared_ptr<i::BackingStore> LookupOrCreateBackingStore(
backing_store = i::BackingStore::WrapAllocation(
i_isolate, data, byte_length, shared, free_on_destruct);
if (free_on_destruct) {
// The embedder requested free-on-destruct. They already have a
// direct pointer to the buffer start, so globally register the backing
// store in case they come back with the same buffer start.
i::GlobalBackingStoreRegistry::Register(backing_store);
}
// The embedder already has a direct pointer to the buffer start, so
// globally register the backing store in case they come back with the
// same buffer start and the backing store is marked as free_on_destruct.
i::GlobalBackingStoreRegistry::Register(backing_store);
}
return backing_store;
}
......
......@@ -123,6 +123,7 @@ BackingStore::~BackingStore() {
if (buffer_start_ == nullptr) return; // nothing to deallocate
if (is_wasm_memory_) {
DCHECK(free_on_destruct_);
TRACE_BS("BSw:free bs=%p mem=%p (length=%zu, capacity=%zu)\n", this,
buffer_start_, byte_length(), byte_capacity_);
if (is_shared_) {
......@@ -500,6 +501,14 @@ void GlobalBackingStoreRegistry::Register(
std::shared_ptr<BackingStore> backing_store) {
if (!backing_store || !backing_store->buffer_start()) return;
if (!backing_store->free_on_destruct()) {
// If the backing store buffer is managed by the embedder,
// then we don't have to guarantee that there is single unique
// BackingStore per buffer_start() because the destructor of
// of the BackingStore will be a no-op in that case.
return;
}
base::MutexGuard scope_lock(&impl()->mutex_);
if (backing_store->globally_registered_) return;
TRACE_BS("BS:reg bs=%p mem=%p (length=%zu, capacity=%zu)\n",
......
......@@ -359,6 +359,35 @@ THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
THREADED_TEST(ArrayBuffer_ExternalReused) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
i::ScopedVector<uint8_t> data(100);
Local<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::New(isolate, data.begin(), 100);
std::shared_ptr<v8::BackingStore> bs1 = ab1->GetBackingStore();
ab1->Detach();
Local<v8::ArrayBuffer> ab2 = v8::ArrayBuffer::New(isolate, data.begin(), 100);
std::shared_ptr<v8::BackingStore> bs2 = ab2->GetBackingStore();
CHECK_EQ(bs1->Data(), bs2->Data());
}
THREADED_TEST(SharedArrayBuffer_ExternalReused) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
i::ScopedVector<uint8_t> data(100);
Local<v8::SharedArrayBuffer> ab1 =
v8::SharedArrayBuffer::New(isolate, data.begin(), 100);
std::shared_ptr<v8::BackingStore> bs1 = ab1->GetBackingStore();
Local<v8::SharedArrayBuffer> ab2 =
v8::SharedArrayBuffer::New(isolate, data.begin(), 100);
std::shared_ptr<v8::BackingStore> bs2 = ab2->GetBackingStore();
CHECK_EQ(bs1->Data(), bs2->Data());
}
THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
i::FLAG_harmony_sharedarraybuffer = true;
LocalContext env;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment