Commit e3072158 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Make ExpandBackground more resilient against OOM

It could happen that a background thread expands the heap by one page,
but by the time the thread tries to allocate on it the space is already
used by other background threads. If this happens three times in a row,
V8 would crash with an OOM error. This CL prevents such situations by
always allocating the object immediately at area_start().

Bug: v8:10315
Change-Id: I6390c84e742bf4105e70e930c21557ff1f4d952d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2743881Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73305}
parent a192820e
...@@ -320,14 +320,17 @@ Page* PagedSpace::Expand() { ...@@ -320,14 +320,17 @@ Page* PagedSpace::Expand() {
return page; return page;
} }
Page* PagedSpace::ExpandBackground(LocalHeap* local_heap) { base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
LocalHeap* local_heap, size_t size_in_bytes) {
Page* page = AllocatePage(); Page* page = AllocatePage();
if (page == nullptr) return nullptr; if (page == nullptr) return {};
base::MutexGuard lock(&space_mutex_); base::MutexGuard lock(&space_mutex_);
AddPage(page); AddPage(page);
Free(page->area_start(), page->area_size(), Address object_start = page->area_start();
CHECK_LE(size_in_bytes, page->area_size());
Free(page->area_start() + size_in_bytes, page->area_size() - size_in_bytes,
SpaceAccountingMode::kSpaceAccounted); SpaceAccountingMode::kSpaceAccounted);
return page; return std::make_pair(object_start, size_in_bytes);
} }
int PagedSpace::CountTotalPages() { int PagedSpace::CountTotalPages() {
...@@ -589,13 +592,12 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground( ...@@ -589,13 +592,12 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
} }
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) && if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(AreaSize()) && heap()->CanExpandOldGenerationBackground(AreaSize())) {
ExpandBackground(local_heap)) { auto result = ExpandBackground(local_heap, max_size_in_bytes);
DCHECK((CountTotalPages() > 1) || if (result) {
(min_size_in_bytes <= free_list_->Available())); DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
auto result = TryAllocationFromFreeListBackground( return result;
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin); }
if (result) return result;
} }
if (collector->sweeping_in_progress()) { if (collector->sweeping_in_progress()) {
......
...@@ -354,7 +354,13 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -354,7 +354,13 @@ class V8_EXPORT_PRIVATE PagedSpace
// it cannot allocate requested number of pages from OS, or if the hard heap // it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit. // size limit has been hit.
virtual Page* Expand(); virtual Page* Expand();
Page* ExpandBackground(LocalHeap* local_heap);
// Expands the space by a single page from a background thread and allocates
// a memory area of the given size in it. If successful the method returns
// the address and size of the area.
base::Optional<std::pair<Address, size_t>> ExpandBackground(
LocalHeap* local_heap, size_t size_in_bytes);
Page* AllocatePage(); Page* AllocatePage();
// Sets up a linear allocation area that fits the given number of bytes. // Sets up a linear allocation area that fits the given number of bytes.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment