Commit 95e39b06 authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[API] Don't use VM methods of v8::ArrayBuffer::Allocator.

- Replaces calls to Allocator Reserve, Free, and SetPermissions
  with equivalent page allocator calls (allocation.h).
- Un-implements these methods to catch usage, in preparation for
  removing these.

Bug: chromium:799573
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: Id233b7a9cfc8e332c64e514f6359e8b670c2d75e
Reviewed-on: https://chromium-review.googlesource.com/911883
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarEric Holk <eholk@chromium.org>
Reviewed-by: 's avatarPeter Marshall <petermarshall@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51340}
parent fd9251db
...@@ -461,16 +461,7 @@ void* v8::ArrayBuffer::Allocator::Reserve(size_t length) { UNIMPLEMENTED(); } ...@@ -461,16 +461,7 @@ void* v8::ArrayBuffer::Allocator::Reserve(size_t length) { UNIMPLEMENTED(); }
void v8::ArrayBuffer::Allocator::Free(void* data, size_t length, void v8::ArrayBuffer::Allocator::Free(void* data, size_t length,
AllocationMode mode) { AllocationMode mode) {
switch (mode) { UNIMPLEMENTED();
case AllocationMode::kNormal: {
Free(data, length);
return;
}
case AllocationMode::kReservation: {
UNIMPLEMENTED();
return;
}
}
} }
void v8::ArrayBuffer::Allocator::SetProtection( void v8::ArrayBuffer::Allocator::SetProtection(
...@@ -483,7 +474,7 @@ namespace { ...@@ -483,7 +474,7 @@ namespace {
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public: public:
virtual void* Allocate(size_t length) { void* Allocate(size_t length) override {
#if V8_OS_AIX && _LINUX_SOURCE_COMPAT #if V8_OS_AIX && _LINUX_SOURCE_COMPAT
// Work around for GCC bug on AIX // Work around for GCC bug on AIX
// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839 // See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
...@@ -494,7 +485,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { ...@@ -494,7 +485,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return data; return data;
} }
virtual void* AllocateUninitialized(size_t length) { void* AllocateUninitialized(size_t length) override {
#if V8_OS_AIX && _LINUX_SOURCE_COMPAT #if V8_OS_AIX && _LINUX_SOURCE_COMPAT
// Work around for GCC bug on AIX // Work around for GCC bug on AIX
// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839 // See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
...@@ -505,42 +496,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { ...@@ -505,42 +496,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return data; return data;
} }
virtual void Free(void* data, size_t) { free(data); } void Free(void* data, size_t) override { free(data); }
virtual void* Reserve(size_t length) {
size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
void* address = i::AllocatePages(i::GetRandomMmapAddr(), allocated,
page_size, PageAllocator::kNoAccess);
return address;
}
virtual void Free(void* data, size_t length,
v8::ArrayBuffer::Allocator::AllocationMode mode) {
switch (mode) {
case v8::ArrayBuffer::Allocator::AllocationMode::kNormal: {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(data, allocated));
return;
}
}
}
virtual void SetProtection(
void* data, size_t length,
v8::ArrayBuffer::Allocator::Protection protection) {
DCHECK(protection == v8::ArrayBuffer::Allocator::Protection::kNoAccess ||
protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite);
PageAllocator::Permission permission =
(protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite)
? PageAllocator::kReadWrite
: PageAllocator::kNoAccess;
CHECK(i::SetPermissions(data, length, permission));
}
}; };
bool RunExtraCode(Isolate* isolate, Local<Context> context, bool RunExtraCode(Isolate* isolate, Local<Context> context,
......
...@@ -84,15 +84,18 @@ class ArrayBufferAllocatorBase : public v8::ArrayBuffer::Allocator { ...@@ -84,15 +84,18 @@ class ArrayBufferAllocatorBase : public v8::ArrayBuffer::Allocator {
allocator_->Free(data, length); allocator_->Free(data, length);
} }
void* Reserve(size_t length) override { return allocator_->Reserve(length); } void* Reserve(size_t length) override {
UNIMPLEMENTED();
return nullptr;
}
void Free(void* data, size_t length, AllocationMode mode) override { void Free(void* data, size_t length, AllocationMode mode) override {
allocator_->Free(data, length, mode); UNIMPLEMENTED();
} }
void SetProtection(void* data, size_t length, void SetProtection(void* data, size_t length,
Protection protection) override { Protection protection) override {
allocator_->SetProtection(data, length, protection); UNIMPLEMENTED();
} }
private: private:
...@@ -121,18 +124,6 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase { ...@@ -121,18 +124,6 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
} }
} }
void* Reserve(size_t length) override {
// |length| must be over the threshold so we can distinguish VM from
// malloced memory.
DCHECK_LE(kVMThreshold, length);
return ArrayBufferAllocatorBase::Reserve(length);
}
void Free(void* data, size_t length, AllocationMode) override {
// Ignore allocation mode; the appropriate action is determined by |length|.
Free(data, length);
}
private: private:
static constexpr size_t kVMThreshold = 65536; static constexpr size_t kVMThreshold = 65536;
static constexpr size_t kTwoGB = 2u * 1024u * 1024u * 1024u; static constexpr size_t kTwoGB = 2u * 1024u * 1024u * 1024u;
...@@ -172,14 +163,6 @@ class MockArrayBufferAllocator : public ArrayBufferAllocatorBase { ...@@ -172,14 +163,6 @@ class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
return ArrayBufferAllocatorBase::Free(data, Adjust(length)); return ArrayBufferAllocatorBase::Free(data, Adjust(length));
} }
void* Reserve(size_t length) override {
return ArrayBufferAllocatorBase::Reserve(Adjust(length));
}
void Free(void* data, size_t length, AllocationMode mode) override {
return ArrayBufferAllocatorBase::Free(data, Adjust(length), mode);
}
private: private:
size_t Adjust(size_t length) { size_t Adjust(size_t length) {
const size_t kAllocationLimit = 10 * kMB; const size_t kAllocationLimit = 10 * kMB;
...@@ -2574,7 +2557,11 @@ void SourceGroup::JoinThread() { ...@@ -2574,7 +2557,11 @@ void SourceGroup::JoinThread() {
ExternalizedContents::~ExternalizedContents() { ExternalizedContents::~ExternalizedContents() {
if (base_ != nullptr) { if (base_ != nullptr) {
Shell::array_buffer_allocator->Free(base_, length_, mode_); if (mode_ == ArrayBuffer::Allocator::AllocationMode::kReservation) {
CHECK(i::FreePages(base_, length_));
} else {
Shell::array_buffer_allocator->Free(base_, length_);
}
} }
} }
......
...@@ -19211,10 +19211,11 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) { ...@@ -19211,10 +19211,11 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
// actually a buffer we are tracking. // actually a buffer we are tracking.
isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace( isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
allocation.length); allocation.length);
CHECK(FreePages(allocation.allocation_base, allocation.length));
} else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length);
} }
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length, allocation.mode);
} }
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate, void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
......
...@@ -54,8 +54,9 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size, ...@@ -54,8 +54,9 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
// We always allocate the largest possible offset into the heap, so the // We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible. // addressable memory after the guard page can be made inaccessible.
*allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize()); size_t page_size = AllocatePageSize();
DCHECK_EQ(0, size % CommitPageSize()); *allocation_length = RoundUp(kWasmMaxHeapOffset, page_size);
DCHECK_EQ(0, size % page_size);
WasmAllocationTracker* const allocation_tracker = WasmAllocationTracker* const allocation_tracker =
isolate->wasm_engine()->allocation_tracker(); isolate->wasm_engine()->allocation_tracker();
...@@ -67,9 +68,9 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size, ...@@ -67,9 +68,9 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
return nullptr; return nullptr;
} }
// The Reserve makes the whole region inaccessible by default. // Make the whole region inaccessible by default.
*allocation_base = *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
isolate->array_buffer_allocator()->Reserve(*allocation_length); PageAllocator::kNoAccess);
if (*allocation_base == nullptr) { if (*allocation_base == nullptr) {
allocation_tracker->ReleaseAddressSpace(*allocation_length); allocation_tracker->ReleaseAddressSpace(*allocation_length);
return nullptr; return nullptr;
...@@ -78,8 +79,7 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size, ...@@ -78,8 +79,7 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
void* memory = *allocation_base; void* memory = *allocation_base;
// Make the part we care about accessible. // Make the part we care about accessible.
isolate->array_buffer_allocator()->SetProtection( CHECK(SetPermissions(memory, size, PageAllocator::kReadWrite));
memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
reinterpret_cast<v8::Isolate*>(isolate) reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size); ->AdjustAmountOfExternalAllocatedMemory(size);
......
...@@ -404,9 +404,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate, ...@@ -404,9 +404,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if ((enable_guard_regions || old_size == new_size) && old_size != 0) { if ((enable_guard_regions || old_size == new_size) && old_size != 0) {
DCHECK_NOT_NULL(old_buffer->backing_store()); DCHECK_NOT_NULL(old_buffer->backing_store());
if (old_size != new_size) { if (old_size != new_size) {
isolate->array_buffer_allocator()->SetProtection( CHECK(i::SetPermissions(old_mem_start, new_size,
old_mem_start, new_size, PageAllocator::kReadWrite));
v8::ArrayBuffer::Allocator::Protection::kReadWrite);
reinterpret_cast<v8::Isolate*>(isolate) reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize); ->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize);
} }
......
...@@ -1017,8 +1017,13 @@ struct ManuallyExternalizedBuffer { ...@@ -1017,8 +1017,13 @@ struct ManuallyExternalizedBuffer {
} }
~ManuallyExternalizedBuffer() { ~ManuallyExternalizedBuffer() {
if (!buffer_->has_guard_region()) { if (!buffer_->has_guard_region()) {
isolate_->array_buffer_allocator()->Free( if (buffer_->allocation_mode() ==
allocation_base_, allocation_length_, buffer_->allocation_mode()); ArrayBuffer::Allocator::AllocationMode::kReservation) {
CHECK(v8::internal::FreePages(allocation_base_, allocation_length_));
} else {
isolate_->array_buffer_allocator()->Free(allocation_base_,
allocation_length_);
}
} }
} }
}; };
......
...@@ -1874,11 +1874,7 @@ class OOMArrayBufferAllocator : public ArrayBuffer::Allocator { ...@@ -1874,11 +1874,7 @@ class OOMArrayBufferAllocator : public ArrayBuffer::Allocator {
public: public:
void* Allocate(size_t) override { return nullptr; } void* Allocate(size_t) override { return nullptr; }
void* AllocateUninitialized(size_t) override { return nullptr; } void* AllocateUninitialized(size_t) override { return nullptr; }
void* Reserve(size_t length) override { return nullptr; }
void Free(void* data, size_t length, AllocationMode mode) override {}
void Free(void*, size_t) override {} void Free(void*, size_t) override {}
void SetProtection(void* data, size_t length,
Protection protection) override {}
}; };
TEST_F(ValueSerializerTest, DecodeArrayBufferOOM) { TEST_F(ValueSerializerTest, DecodeArrayBufferOOM) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment