Commit 29bb707e authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[Memory] Add OnCriticalMemoryPressure overload to v8::Platform.

- Adds overload to v8::Platform that will make it easier for embedders to
  maintain a reserve of address space for large, contiguous allocations.
- Rewrites retry logic using loops.
- Moves retry logic from some VirtualMemory allocation functions to AllocPages.

Bug: chromium:756050
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: I52e66f9f8b15b6ce2a2f36e74783f178b8cd5cf7
Reviewed-on: https://chromium-review.googlesource.com/840724
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50303}
parent c8736f68
...@@ -158,7 +158,7 @@ class TracingController { ...@@ -158,7 +158,7 @@ class TracingController {
}; };
/** /**
* A V8 page memory allocator. * A V8 memory page allocator.
* *
* Can be implemented by an embedder to manage large host OS allocations. * Can be implemented by an embedder to manage large host OS allocations.
*/ */
...@@ -246,7 +246,7 @@ class Platform { ...@@ -246,7 +246,7 @@ class Platform {
virtual ~Platform() = default; virtual ~Platform() = default;
/** /**
* Allows the embedder to manage large memory allocations. * Allows the embedder to manage memory page allocations.
*/ */
virtual PageAllocator* GetPageAllocator() { virtual PageAllocator* GetPageAllocator() {
// TODO(bbudge) Make this abstract after all embedders implement this. // TODO(bbudge) Make this abstract after all embedders implement this.
...@@ -261,12 +261,21 @@ class Platform { ...@@ -261,12 +261,21 @@ class Platform {
* Embedder overrides of this function must NOT call back into V8. * Embedder overrides of this function must NOT call back into V8.
*/ */
virtual void OnCriticalMemoryPressure() { virtual void OnCriticalMemoryPressure() {
// TODO(bbudge) Change this method by adding a failed_allocation_size // TODO(bbudge) Remove this when embedders override the following method.
// parameter, and to return a bool. This will allow embedders to manage a
// reserve, rather than simply release it all on a failure.
// See crbug.com/634547. // See crbug.com/634547.
} }
/**
* Enables the embedder to respond in cases where V8 can't allocate large
* memory regions. The |length| parameter is the amount of memory needed.
* Returns true if memory is now available. Returns false if no memory could
* be made available. V8 will retry allocations until this method returns
* false.
*
* Embedder overrides of this function must NOT call back into V8.
*/
virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
/** /**
* Gets the number of threads that are used to execute background tasks. Is * Gets the number of threads that are used to execute background tasks. Is
* used to estimate the number of tasks a work package should be split into. * used to estimate the number of tasks a work package should be split into.
......
...@@ -60,17 +60,22 @@ static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type ...@@ -60,17 +60,22 @@ static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); } v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); }
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;
} // namespace } // namespace
void* Malloced::New(size_t size) { void* Malloced::New(size_t size) {
void* result = malloc(size); void* result = nullptr;
if (result == nullptr) { for (int i = 0; i < kAllocationTries; ++i) {
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
result = malloc(size); result = malloc(size);
if (result != nullptr) break;
if (!OnCriticalMemoryPressure(size)) break;
}
if (result == nullptr) { if (result == nullptr) {
V8::FatalProcessOutOfMemory("Malloced operator new"); V8::FatalProcessOutOfMemory("Malloced operator new");
} }
}
return result; return result;
} }
...@@ -102,15 +107,16 @@ char* StrNDup(const char* str, int n) { ...@@ -102,15 +107,16 @@ char* StrNDup(const char* str, int n) {
void* AlignedAlloc(size_t size, size_t alignment) { void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_LE(V8_ALIGNOF(void*), alignment); DCHECK_LE(V8_ALIGNOF(void*), alignment);
DCHECK(base::bits::IsPowerOfTwo(alignment)); DCHECK(base::bits::IsPowerOfTwo(alignment));
void* ptr = AlignedAllocInternal(size, alignment); void* result = nullptr;
if (ptr == nullptr) { for (int i = 0; i < kAllocationTries; ++i) {
V8::GetCurrentPlatform()->OnCriticalMemoryPressure(); result = AlignedAllocInternal(size, alignment);
ptr = AlignedAllocInternal(size, alignment); if (result != nullptr) break;
if (ptr == nullptr) { if (!OnCriticalMemoryPressure(size + alignment)) break;
V8::FatalProcessOutOfMemory("AlignedAlloc");
} }
if (result == nullptr) {
V8::FatalProcessOutOfMemory("AlignedAlloc");
} }
return ptr; return result;
} }
...@@ -137,8 +143,14 @@ void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); } ...@@ -137,8 +143,14 @@ void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
void* AllocatePages(void* address, size_t size, size_t alignment, void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) { PageAllocator::Permission access) {
void* result = void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
result =
GetPageAllocator()->AllocatePages(address, size, alignment, access); GetPageAllocator()->AllocatePages(address, size, alignment, access);
if (result != nullptr) break;
size_t request_size = size + alignment - AllocatePageSize();
if (!OnCriticalMemoryPressure(request_size)) break;
}
#if defined(LEAK_SANITIZER) #if defined(LEAK_SANITIZER)
if (result != nullptr) { if (result != nullptr) {
__lsan_register_root_region(result, size); __lsan_register_root_region(result, size);
...@@ -182,6 +194,15 @@ byte* AllocatePage(void* address, size_t* allocated) { ...@@ -182,6 +194,15 @@ byte* AllocatePage(void* address, size_t* allocated) {
return static_cast<byte*>(result); return static_cast<byte*>(result);
} }
bool OnCriticalMemoryPressure(size_t length) {
// TODO(bbudge) Rework retry logic once embedders implement the more
// informative overload.
if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
}
return true;
}
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {} VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment) VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
...@@ -250,30 +271,22 @@ void VirtualMemory::TakeControl(VirtualMemory* from) { ...@@ -250,30 +271,22 @@ void VirtualMemory::TakeControl(VirtualMemory* from) {
} }
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) { bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
VirtualMemory first_try(size, hint); VirtualMemory vm(size, hint);
if (first_try.IsReserved()) { if (vm.IsReserved()) {
result->TakeControl(&first_try); result->TakeControl(&vm);
return true; return true;
} }
return false;
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
VirtualMemory second_try(size, hint);
result->TakeControl(&second_try);
return result->IsReserved();
} }
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint, bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result) { VirtualMemory* result) {
VirtualMemory first_try(size, hint, alignment); VirtualMemory vm(size, hint, alignment);
if (first_try.IsReserved()) { if (vm.IsReserved()) {
result->TakeControl(&first_try); result->TakeControl(&vm);
return true; return true;
} }
return false;
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
VirtualMemory second_try(size, hint, alignment);
result->TakeControl(&second_try);
return result->IsReserved();
} }
} // namespace internal } // namespace internal
......
...@@ -127,6 +127,11 @@ V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size, ...@@ -127,6 +127,11 @@ V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
V8_EXPORT_PRIVATE V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated); V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated);
// Function that may release reserved memory regions to allow failed allocations
// to succeed. |length| is the amount of memory needed. Returns |true| if memory
// could be released, false otherwise.
V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
// Represents and controls an area of reserved memory. // Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory { class V8_EXPORT_PRIVATE VirtualMemory {
public: public:
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include <malloc.h> // NOLINT #include <malloc.h> // NOLINT
#endif #endif
#include "src/allocation.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -82,10 +84,12 @@ Segment* AccountingAllocator::GetSegment(size_t bytes) { ...@@ -82,10 +84,12 @@ Segment* AccountingAllocator::GetSegment(size_t bytes) {
} }
Segment* AccountingAllocator::AllocateSegment(size_t bytes) { Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = malloc(bytes); const int kAllocationTries = 2;
if (memory == nullptr) { void* memory = nullptr;
V8::GetCurrentPlatform()->OnCriticalMemoryPressure(); for (int i = 0; i < kAllocationTries; ++i) {
memory = malloc(bytes); memory = malloc(bytes);
if (memory != nullptr) break;
if (!OnCriticalMemoryPressure(bytes)) break;
} }
if (memory != nullptr) { if (memory != nullptr) {
base::AtomicWord current = base::AtomicWord current =
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment