Commit 7e283d74 authored by mlippautz's avatar mlippautz Committed by Commit bot

Reland of "[heap] Add more tasks for parallel compaction"

- We now compute the number of parallel compaction tasks, depending on the
  evacuation candidate list, the number of cores, and some hard limit.
- Free memory is moved over to compaction tasks (up to some limit)
- Moving over memory is done by dividing the free list of a given space up among
  other free lists. Since this is potentially slow we limit the maximum amount
  of moved memory.

This reverts commit bfccd518.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1365743003

Cr-Commit-Position: refs/heads/master@{#30945}
parent 6256dc53
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "src/base/atomicops.h" #include "src/base/atomicops.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/sys-info.h"
#include "src/code-stubs.h" #include "src/code-stubs.h"
#include "src/compilation-cache.h" #include "src/compilation-cache.h"
#include "src/cpu-profiler.h" #include "src/cpu-profiler.h"
...@@ -572,7 +573,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() { ...@@ -572,7 +573,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes(); heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes(); heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes(); heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes();
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) { if (FLAG_verify_heap && !evacuation()) {
VerifyEvacuation(heap_); VerifyEvacuation(heap_);
...@@ -593,7 +593,6 @@ bool MarkCompactCollector::IsSweepingCompleted() { ...@@ -593,7 +593,6 @@ bool MarkCompactCollector::IsSweepingCompleted() {
void MarkCompactCollector::RefillFreeList(PagedSpace* space) { void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
FreeList* free_list; FreeList* free_list;
if (space == heap()->old_space()) { if (space == heap()->old_space()) {
free_list = free_list_old_space_.get(); free_list = free_list_old_space_.get();
} else if (space == heap()->code_space()) { } else if (space == heap()->code_space()) {
...@@ -3370,52 +3369,57 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage( ...@@ -3370,52 +3369,57 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
} }
int MarkCompactCollector::NumberOfParallelCompactionTasks() {
if (!FLAG_parallel_compaction) return 1;
// We cap the number of parallel compaction tasks by
// - (#cores - 1)
// - a value depending on the list of evacuation candidates
// - a hard limit
const int kPagesPerCompactionTask = 4;
const int kMaxCompactionTasks = 8;
return Min(kMaxCompactionTasks,
Min(1 + evacuation_candidates_.length() / kPagesPerCompactionTask,
Max(1, base::SysInfo::NumberOfProcessors() - 1)));
}
void MarkCompactCollector::EvacuatePagesInParallel() { void MarkCompactCollector::EvacuatePagesInParallel() {
if (evacuation_candidates_.length() == 0) return; if (evacuation_candidates_.length() == 0) return;
int num_tasks = 1; const int num_tasks = NumberOfParallelCompactionTasks();
if (FLAG_parallel_compaction) {
num_tasks = NumberOfParallelCompactionTasks();
}
// Set up compaction spaces. // Set up compaction spaces.
CompactionSpaceCollection** compaction_spaces_for_tasks = CompactionSpaceCollection** spaces_for_tasks =
new CompactionSpaceCollection*[num_tasks]; new CompactionSpaceCollection*[num_tasks];
for (int i = 0; i < num_tasks; i++) { for (int i = 0; i < num_tasks; i++) {
compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
} }
heap()->old_space()->DivideMemory(spaces_for_tasks, num_tasks, 1 * MB);
compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory( heap()->code_space()->DivideMemory(spaces_for_tasks, num_tasks, 1 * MB);
heap()->old_space());
compaction_spaces_for_tasks[0]
->Get(CODE_SPACE)
->MoveOverFreeMemory(heap()->code_space());
compaction_in_progress_ = true; compaction_in_progress_ = true;
// Kick off parallel tasks. // Kick off parallel tasks.
for (int i = 1; i < num_tasks; i++) { for (int i = 1; i < num_tasks; i++) {
concurrent_compaction_tasks_active_++; concurrent_compaction_tasks_active_++;
V8::GetCurrentPlatform()->CallOnBackgroundThread( V8::GetCurrentPlatform()->CallOnBackgroundThread(
new CompactionTask(heap(), compaction_spaces_for_tasks[i]), new CompactionTask(heap(), spaces_for_tasks[i]),
v8::Platform::kShortRunningTask); v8::Platform::kShortRunningTask);
} }
// Contribute in main thread. Counter and signal are in principal not needed. // Perform compaction on the main thread.
concurrent_compaction_tasks_active_++; EvacuatePages(spaces_for_tasks[0], &migration_slots_buffer_);
EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
pending_compaction_tasks_semaphore_.Signal();
WaitUntilCompactionCompleted(); WaitUntilCompactionCompleted();
// Merge back memory (compacted and unused) from compaction spaces. // Merge back memory (compacted and unused) from compaction spaces.
for (int i = 0; i < num_tasks; i++) { for (int i = 0; i < num_tasks; i++) {
heap()->old_space()->MergeCompactionSpace( heap()->old_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); spaces_for_tasks[i]->Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace( heap()->code_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); spaces_for_tasks[i]->Get(CODE_SPACE));
delete compaction_spaces_for_tasks[i]; delete spaces_for_tasks[i];
} }
delete[] compaction_spaces_for_tasks; delete[] spaces_for_tasks;
// Finalize sequentially. // Finalize sequentially.
const int num_pages = evacuation_candidates_.length(); const int num_pages = evacuation_candidates_.length();
......
...@@ -709,11 +709,8 @@ class MarkCompactCollector { ...@@ -709,11 +709,8 @@ class MarkCompactCollector {
void EvacuatePagesInParallel(); void EvacuatePagesInParallel();
int NumberOfParallelCompactionTasks() { // The number of parallel compaction tasks, including the main thread.
// TODO(hpayer, mlippautz): Figure out some logic to determine the number int NumberOfParallelCompactionTasks();
// of compaction tasks.
return 1;
}
void WaitUntilCompactionCompleted(); void WaitUntilCompactionCompleted();
......
...@@ -1014,7 +1014,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { ...@@ -1014,7 +1014,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Update and clear accounting statistics. // Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_); accounting_stats_.Merge(other->accounting_stats_);
other->accounting_stats_.Reset(); other->accounting_stats_.Clear();
// Move over pages. // Move over pages.
PageIterator it(other); PageIterator it(other);
...@@ -2213,6 +2213,44 @@ intptr_t FreeList::Concatenate(FreeList* free_list) { ...@@ -2213,6 +2213,44 @@ intptr_t FreeList::Concatenate(FreeList* free_list) {
} }
FreeSpace* PagedSpace::TryRemoveMemory() {
FreeSpace* space = nullptr;
int node_size = 0;
space = free_list()->FindNodeIn(FreeList::kHuge, &node_size);
if (space == nullptr)
space = free_list()->FindNodeIn(FreeList::kLarge, &node_size);
if (space == nullptr)
space = free_list()->FindNodeIn(FreeList::kMedium, &node_size);
if (space == nullptr)
space = free_list()->FindNodeIn(FreeList::kSmall, &node_size);
if (space != nullptr) {
accounting_stats_.AllocateBytes(node_size);
}
return space;
}
void PagedSpace::DivideMemory(CompactionSpaceCollection** other, int num,
intptr_t limit) {
CHECK(num > 0);
CHECK(other != nullptr);
if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
EmptyAllocationInfo();
int index = 0;
FreeSpace* node = nullptr;
for (CompactionSpace* space = other[index]->Get(identity());
((node = TryRemoveMemory()) != nullptr) &&
(space->free_list()->available() < limit);
space = other[++index % num]->Get(identity())) {
CHECK(space->identity() == identity());
space->AddMemory(node->address(), node->size());
}
}
void FreeList::Reset() { void FreeList::Reset() {
small_list_.Reset(); small_list_.Reset();
medium_list_.Reset(); medium_list_.Reset();
...@@ -2256,39 +2294,62 @@ int FreeList::Free(Address start, int size_in_bytes) { ...@@ -2256,39 +2294,62 @@ int FreeList::Free(Address start, int size_in_bytes) {
} }
void FreeList::UpdateFragmentationStats(FreeListCategoryType category,
Address address, int size) {
Page* page = Page::FromAddress(address);
switch (category) {
case kSmall:
page->add_available_in_small_free_list(size);
break;
case kMedium:
page->add_available_in_medium_free_list(size);
break;
case kLarge:
page->add_available_in_large_free_list(size);
break;
case kHuge:
page->add_available_in_huge_free_list(size);
break;
default:
UNREACHABLE();
}
}
FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
if (node != nullptr) {
UpdateFragmentationStats(category, node->address(), -(*node_size));
DCHECK(IsVeryLong() || available() == SumFreeLists());
}
return node;
}
FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeSpace* node = NULL; FreeSpace* node = NULL;
Page* page = NULL; Page* page = NULL;
if (size_in_bytes <= kSmallAllocationMax) { if (size_in_bytes <= kSmallAllocationMax) {
node = small_list_.PickNodeFromList(node_size); node = FindNodeIn(kSmall, node_size);
if (node != NULL) { if (node != nullptr) {
DCHECK(size_in_bytes <= *node_size); DCHECK(size_in_bytes <= node->size());
page = Page::FromAddress(node->address());
page->add_available_in_small_free_list(-(*node_size));
DCHECK(IsVeryLong() || available() == SumFreeLists());
return node; return node;
} }
} }
if (size_in_bytes <= kMediumAllocationMax) { if (size_in_bytes <= kMediumAllocationMax) {
node = medium_list_.PickNodeFromList(node_size); node = FindNodeIn(kMedium, node_size);
if (node != NULL) { if (node != nullptr) {
DCHECK(size_in_bytes <= *node_size); DCHECK(size_in_bytes <= node->size());
page = Page::FromAddress(node->address());
page->add_available_in_medium_free_list(-(*node_size));
DCHECK(IsVeryLong() || available() == SumFreeLists());
return node; return node;
} }
} }
if (size_in_bytes <= kLargeAllocationMax) { if (size_in_bytes <= kLargeAllocationMax) {
node = large_list_.PickNodeFromList(node_size); node = FindNodeIn(kLarge, node_size);
if (node != NULL) { if (node != nullptr) {
DCHECK(size_in_bytes <= *node_size); DCHECK(size_in_bytes <= node->size());
page = Page::FromAddress(node->address());
page->add_available_in_large_free_list(-(*node_size));
DCHECK(IsVeryLong() || available() == SumFreeLists());
return node; return node;
} }
} }
...@@ -2544,7 +2605,6 @@ intptr_t PagedSpace::SizeOfObjects() { ...@@ -2544,7 +2605,6 @@ intptr_t PagedSpace::SizeOfObjects() {
(unswept_free_bytes_ == 0)); (unswept_free_bytes_ == 0));
const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top()); const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top());
DCHECK_GE(size, 0); DCHECK_GE(size, 0);
USE(size);
return size; return size;
} }
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class CompactionSpaceCollection;
class Isolate; class Isolate;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
...@@ -1420,19 +1421,11 @@ class AllocationInfo { ...@@ -1420,19 +1421,11 @@ class AllocationInfo {
// An abstraction of the accounting statistics of a page-structured space. // An abstraction of the accounting statistics of a page-structured space.
// The 'capacity' of a space is the number of object-area bytes (i.e., not
// including page bookkeeping structures) currently in the space. The 'size'
// of a space is the number of allocated bytes, the 'waste' in the space is
// the number of bytes that are not allocated and not available to
// allocation without reorganizing the space via a GC (e.g. small blocks due
// to internal fragmentation, top of page areas in map space), and the bytes
// 'available' is the number of unallocated bytes that are not waste. The
// capacity is the sum of size, waste, and available.
// //
// The stats are only set by functions that ensure they stay balanced. These // The stats are only set by functions that ensure they stay balanced. These
// functions increase or decrease one of the non-capacity stats in // functions increase or decrease one of the non-capacity stats in conjunction
// conjunction with capacity, or else they always balance increases and // with capacity, or else they always balance increases and decreases to the
// decreases to the non-capacity stats. // non-capacity stats.
class AllocationStats BASE_EMBEDDED { class AllocationStats BASE_EMBEDDED {
public: public:
AllocationStats() { Clear(); } AllocationStats() { Clear(); }
...@@ -1443,6 +1436,7 @@ class AllocationStats BASE_EMBEDDED { ...@@ -1443,6 +1436,7 @@ class AllocationStats BASE_EMBEDDED {
max_capacity_ = 0; max_capacity_ = 0;
size_ = 0; size_ = 0;
waste_ = 0; waste_ = 0;
borrowed_ = 0;
} }
void ClearSizeWaste() { void ClearSizeWaste() {
...@@ -1462,6 +1456,7 @@ class AllocationStats BASE_EMBEDDED { ...@@ -1462,6 +1456,7 @@ class AllocationStats BASE_EMBEDDED {
intptr_t MaxCapacity() { return max_capacity_; } intptr_t MaxCapacity() { return max_capacity_; }
intptr_t Size() { return size_; } intptr_t Size() { return size_; }
intptr_t Waste() { return waste_; } intptr_t Waste() { return waste_; }
intptr_t Borrowed() { return borrowed_; }
// Grow the space by adding available bytes. They are initially marked as // Grow the space by adding available bytes. They are initially marked as
// being in use (part of the size), but will normally be immediately freed, // being in use (part of the size), but will normally be immediately freed,
...@@ -1479,15 +1474,19 @@ class AllocationStats BASE_EMBEDDED { ...@@ -1479,15 +1474,19 @@ class AllocationStats BASE_EMBEDDED {
// during sweeping, bytes have been marked as being in use (part of the size) // during sweeping, bytes have been marked as being in use (part of the size)
// and are hereby freed. // and are hereby freed.
void ShrinkSpace(int size_in_bytes) { void ShrinkSpace(int size_in_bytes) {
DCHECK_GE(size_in_bytes, 0);
capacity_ -= size_in_bytes; capacity_ -= size_in_bytes;
size_ -= size_in_bytes; size_ -= size_in_bytes;
DCHECK(size_ >= 0); DCHECK_GE(size_, 0);
DCHECK_GE(capacity_, 0);
} }
// Allocate from available bytes (available -> size). // Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) { void AllocateBytes(intptr_t size_in_bytes) {
DCHECK_GE(size_in_bytes, 0);
size_ += size_in_bytes; size_ += size_in_bytes;
DCHECK(size_ >= 0); DCHECK_GE(size_, 0);
DCHECK_LE(size_, capacity_);
} }
// Free allocated bytes, making them available (size -> available). // Free allocated bytes, making them available (size -> available).
...@@ -1504,26 +1503,60 @@ class AllocationStats BASE_EMBEDDED { ...@@ -1504,26 +1503,60 @@ class AllocationStats BASE_EMBEDDED {
// Merge {other} into {this}. // Merge {other} into {this}.
void Merge(const AllocationStats& other) { void Merge(const AllocationStats& other) {
DCHECK_GE(other.capacity_, 0);
DCHECK_GE(other.size_, 0);
DCHECK_GE(other.waste_, 0);
capacity_ += other.capacity_; capacity_ += other.capacity_;
size_ += other.size_; size_ += other.size_;
// See description of |borrowed_| below why we need to remove it from
// |capacity_| as well as |size_|.
capacity_ -= other.borrowed_;
size_ -= other.borrowed_;
waste_ += other.waste_; waste_ += other.waste_;
if (other.max_capacity_ > max_capacity_) { if (capacity_ > max_capacity_) {
max_capacity_ = other.max_capacity_; max_capacity_ = capacity_;
} }
} }
void DecreaseCapacity(intptr_t size_in_bytes) { void DecreaseCapacity(intptr_t size_in_bytes) {
DCHECK_GE(size_in_bytes, 0);
capacity_ -= size_in_bytes; capacity_ -= size_in_bytes;
DCHECK_GE(capacity_, 0); DCHECK_GE(capacity_, size_);
} }
void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; } void IncreaseCapacity(intptr_t size_in_bytes) {
DCHECK_GE(size_in_bytes, 0);
capacity_ += size_in_bytes;
}
void BorrowMemory(intptr_t size_in_bytes) {
DCHECK_GE(size_in_bytes, 0);
borrowed_ += size_in_bytes;
}
private: private:
// |capacity_| is the number of object-area bytes (i.e., not including page
// bookkeeping structures) currently in the space.
intptr_t capacity_; intptr_t capacity_;
// |max_capacity_| is the maximum |capacity_| ever observed by a space.
intptr_t max_capacity_; intptr_t max_capacity_;
// |size_| is the number of allocated bytes.
intptr_t size_; intptr_t size_;
// |waste_| is the number of bytes that are not allocated and not available
// to allocation without reorganizing the space via a GC (e.g. small blocks
// due to internal fragmentation, top of page areas in map space
intptr_t waste_; intptr_t waste_;
// |borrowed_| denotes the number of bytes that are currently borrowed in this
// space, i.e., they have been accounted as allocated in another space, but
// have been moved over (e.g. through a free list) to the current space.
// Note that accounting them as allocated results in them being included
// in |size_| as well as |capacity_| of the original space. The temporary
// double-accounting is fixed upon merging accounting stats.
intptr_t borrowed_;
}; };
...@@ -1682,6 +1715,8 @@ class FreeList { ...@@ -1682,6 +1715,8 @@ class FreeList {
PagedSpace* owner() { return owner_; } PagedSpace* owner() { return owner_; }
private: private:
enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
// The size range of blocks, in bytes. // The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize; static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize; static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
...@@ -1695,6 +1730,27 @@ class FreeList { ...@@ -1695,6 +1730,27 @@ class FreeList {
static const int kLargeAllocationMax = kMediumListMax; static const int kLargeAllocationMax = kMediumListMax;
FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
switch (category) {
case kSmall:
return &small_list_;
case kMedium:
return &medium_list_;
case kLarge:
return &large_list_;
case kHuge:
return &huge_list_;
default:
UNREACHABLE();
}
UNREACHABLE();
return nullptr;
}
void UpdateFragmentationStats(FreeListCategoryType category, Address address,
int size);
PagedSpace* owner_; PagedSpace* owner_;
Heap* heap_; Heap* heap_;
...@@ -1703,6 +1759,8 @@ class FreeList { ...@@ -1703,6 +1759,8 @@ class FreeList {
FreeListCategory large_list_; FreeListCategory large_list_;
FreeListCategory huge_list_; FreeListCategory huge_list_;
friend class PagedSpace;
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
}; };
...@@ -1985,7 +2043,25 @@ class PagedSpace : public Space { ...@@ -1985,7 +2043,25 @@ class PagedSpace : public Space {
virtual bool is_local() { return false; } virtual bool is_local() { return false; }
// Divide {this} free lists up among {other} CompactionSpaceCollections
// up to some certain {limit} of bytes. Note that this operation eventually
// needs to iterate over nodes one-by-one, making it a potentially slow
// operation.
void DivideMemory(CompactionSpaceCollection** other, int num, intptr_t limit);
protected: protected:
// Adds memory starting at {start} of {size_in_bytes} to the space.
void AddMemory(Address start, int size_in_bytes) {
IncreaseCapacity(size_in_bytes);
accounting_stats_.BorrowMemory(size_in_bytes);
Free(start, size_in_bytes);
}
// Tries to remove some memory from {this} free lists. We try to remove
// as much memory as possible, i.e., we check the free lists from huge
// to small.
FreeSpace* TryRemoveMemory();
// PagedSpaces that should be included in snapshots have different, i.e., // PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages. // smaller, initial pages.
virtual bool snapshotable() { return true; } virtual bool snapshotable() { return true; }
...@@ -2741,12 +2817,6 @@ class CompactionSpace : public PagedSpace { ...@@ -2741,12 +2817,6 @@ class CompactionSpace : public PagedSpace {
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
: PagedSpace(heap, id, executable) {} : PagedSpace(heap, id, executable) {}
// Adds external memory starting at {start} of {size_in_bytes} to the space.
void AddExternalMemory(Address start, int size_in_bytes) {
IncreaseCapacity(size_in_bytes);
Free(start, size_in_bytes);
}
virtual bool is_local() { return true; } virtual bool is_local() { return true; }
protected: protected:
......
...@@ -458,8 +458,8 @@ TEST(CompactionSpaceUsingExternalMemory) { ...@@ -458,8 +458,8 @@ TEST(CompactionSpaceUsingExternalMemory) {
CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize())); CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_scope(isolate, allocator); TestMemoryAllocatorScope test_scope(isolate, allocator);
CompactionSpace* compaction_space = CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE); CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
CHECK(compaction_space != NULL); CHECK(compaction_space != NULL);
CHECK(compaction_space->SetUp()); CHECK(compaction_space->SetUp());
...@@ -498,17 +498,11 @@ TEST(CompactionSpaceUsingExternalMemory) { ...@@ -498,17 +498,11 @@ TEST(CompactionSpaceUsingExternalMemory) {
// We expect two pages to be reachable from old_space in the end. // We expect two pages to be reachable from old_space in the end.
const intptr_t kExpectedOldSpacePagesAfterMerge = 2; const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
Object* chunk =
old_space->AllocateRawUnaligned(static_cast<int>(rest)).ToObjectChecked();
CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages); CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
CHECK(chunk != nullptr);
CHECK(chunk->IsHeapObject());
CHECK_EQ(compaction_space->CountTotalPages(), 0); CHECK_EQ(compaction_space->CountTotalPages(), 0);
CHECK_EQ(compaction_space->Capacity(), 0); CHECK_EQ(compaction_space->Capacity(), 0);
// Make the rest of memory available for compaction. // Make the rest of memory available for compaction.
compaction_space->AddExternalMemory(HeapObject::cast(chunk)->address(), old_space->DivideMemory(&collection, 1, rest);
static_cast<int>(rest));
CHECK_EQ(compaction_space->CountTotalPages(), 0); CHECK_EQ(compaction_space->CountTotalPages(), 0);
CHECK_EQ(compaction_space->Capacity(), rest); CHECK_EQ(compaction_space->Capacity(), rest);
while (num_rest_objects-- > 0) { while (num_rest_objects-- > 0) {
...@@ -525,7 +519,7 @@ TEST(CompactionSpaceUsingExternalMemory) { ...@@ -525,7 +519,7 @@ TEST(CompactionSpaceUsingExternalMemory) {
old_space->MergeCompactionSpace(compaction_space); old_space->MergeCompactionSpace(compaction_space);
CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge); CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
delete compaction_space; delete collection;
delete old_space; delete old_space;
allocator->TearDown(); allocator->TearDown();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment