Commit f7292352 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Simplify distribution of remaining memory during sweeping & compaction

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1653973003

Cr-Commit-Position: refs/heads/master@{#33668}
parent 6da81619
......@@ -3282,18 +3282,10 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// Set up compaction spaces.
Evacuator** evacuators = new Evacuator*[num_tasks];
CompactionSpaceCollection** compaction_spaces_for_tasks =
new CompactionSpaceCollection*[num_tasks];
for (int i = 0; i < num_tasks; i++) {
evacuators[i] = new Evacuator(this, evacuation_candidates_,
newspace_evacuation_candidates_);
compaction_spaces_for_tasks[i] = evacuators[i]->compaction_spaces();
}
heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
num_tasks);
heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
num_tasks);
delete[] compaction_spaces_for_tasks;
// Kick off parallel tasks.
StartParallelCompaction(evacuators, num_tasks);
......
......@@ -984,52 +984,6 @@ void PagedSpace::AddMemory(Address start, intptr_t size) {
}
FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
if (free_space != nullptr) {
accounting_stats_.DecreaseCapacity(free_space->size());
}
return free_space;
}
void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
int num, intptr_t limit) {
DCHECK_GT(num, 0);
DCHECK(other != nullptr);
if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
EmptyAllocationInfo();
bool memory_available = true;
bool spaces_need_memory = true;
FreeSpace* node = nullptr;
CompactionSpace* current_space = nullptr;
// Iterate over spaces and memory as long as we have memory and there are
// spaces in need of some.
while (memory_available && spaces_need_memory) {
spaces_need_memory = false;
// Round-robin over all spaces.
for (int i = 0; i < num; i++) {
current_space = other[i]->Get(identity());
if (current_space->free_list()->Available() < limit) {
// Space has not reached its limit. Try to get some memory.
spaces_need_memory = true;
node = TryRemoveMemory(limit - current_space->free_list()->Available());
if (node != nullptr) {
CHECK(current_space->identity() == identity());
current_space->AddMemory(node->address(), node->size());
} else {
memory_available = false;
break;
}
}
}
}
}
void PagedSpace::RefillFreeList() {
MarkCompactCollector* collector = heap()->mark_compact_collector();
FreeList* free_list = nullptr;
......@@ -1072,7 +1026,6 @@ void CompactionSpace::RefillFreeList() {
}
}
void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to
......
......@@ -2119,9 +2119,6 @@ class PagedSpace : public Space {
// e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other);
void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
intptr_t limit = kCompactionMemoryWanted);
// Refills the free list from the corresponding free list filled by the
// sweeper.
virtual void RefillFreeList();
......@@ -2129,8 +2126,6 @@ class PagedSpace : public Space {
protected:
void AddMemory(Address start, intptr_t size);
FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
void MoveOverFreeMemory(PagedSpace* other);
// PagedSpaces that should be included in snapshots have different, i.e.,
......@@ -2893,12 +2888,6 @@ class CompactionSpace : public PagedSpace {
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
: PagedSpace(heap, id, executable) {}
// Adds external memory starting at {start} of {size_in_bytes} to the space.
void AddExternalMemory(Address start, int size_in_bytes) {
IncreaseCapacity(size_in_bytes);
Free(start, size_in_bytes);
}
bool is_local() override { return true; }
void RefillFreeList() override;
......
......@@ -59,25 +59,6 @@ class HeapTester {
/* test-api.cc */
static void ResetWeakHandle(bool global_gc);
/* test-spaces.cc */
static CompactionSpaceCollection** InitializeCompactionSpaces(Heap* heap,
int num_spaces);
static void DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
int num_spaces);
static void MergeCompactionSpaces(PagedSpace* space,
CompactionSpaceCollection** spaces,
int num_spaces);
static void AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
AllocationSpace id, int num_spaces,
int num_objects, int object_size);
static void CompactionStats(CompactionSpaceCollection** spaces,
AllocationSpace id, int num_spaces,
intptr_t* capacity, intptr_t* size);
static void TestCompactionSpaceDivide(int num_additional_objects,
int object_size,
int num_compaction_spaces,
int additional_capacity_in_bytes);
};
} // namespace internal
......
......@@ -448,236 +448,6 @@ TEST(CompactionSpace) {
}
TEST(CompactionSpaceUsingExternalMemory) {
const int kObjectSize = 512;
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* allocator = new MemoryAllocator(isolate);
CHECK(allocator != nullptr);
CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_scope(isolate, allocator);
CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
CHECK(compaction_space != NULL);
CHECK(compaction_space->SetUp());
OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
CHECK(old_space != NULL);
CHECK(old_space->SetUp());
// The linear allocation area already counts as used bytes, making
// exact testing impossible.
heap->DisableInlineAllocation();
// Test:
// * Allocate a backing store in old_space.
// * Compute the number num_rest_objects of kObjectSize objects that fit into
// of available memory.
// kNumRestObjects.
// * Add the rest of available memory to the compaction space.
// * Allocate kNumRestObjects in the compaction space.
// * Allocate one object more.
// * Merge the compaction space and compare the expected number of pages.
// Allocate a single object in old_space to initialize a backing page.
old_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
// Compute the number of objects that fit into the rest in old_space.
intptr_t rest = static_cast<int>(old_space->Available());
CHECK_GT(rest, 0);
intptr_t num_rest_objects = rest / kObjectSize;
// After allocating num_rest_objects in compaction_space we allocate a bit
// more.
const intptr_t kAdditionalCompactionMemory = kObjectSize;
// We expect a single old_space page.
const intptr_t kExpectedInitialOldSpacePages = 1;
// We expect a single additional page in compaction space because we mostly
// use external memory.
const intptr_t kExpectedCompactionPages = 1;
// We expect two pages to be reachable from old_space in the end.
const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
CHECK_EQ(compaction_space->CountTotalPages(), 0);
CHECK_EQ(compaction_space->Capacity(), 0);
// Make the rest of memory available for compaction.
old_space->DivideUponCompactionSpaces(&collection, 1, rest);
CHECK_EQ(compaction_space->CountTotalPages(), 0);
CHECK_EQ(compaction_space->Capacity(), rest);
while (num_rest_objects-- > 0) {
compaction_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
}
// We only used external memory so far.
CHECK_EQ(compaction_space->CountTotalPages(), 0);
// Additional allocation.
compaction_space->AllocateRawUnaligned(kAdditionalCompactionMemory)
.ToObjectChecked();
// Now the compaction space shouldve also acquired a page.
CHECK_EQ(compaction_space->CountTotalPages(), kExpectedCompactionPages);
old_space->MergeCompactionSpace(compaction_space);
CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
delete collection;
delete old_space;
allocator->TearDown();
delete allocator;
}
CompactionSpaceCollection** HeapTester::InitializeCompactionSpaces(
Heap* heap, int num_spaces) {
CompactionSpaceCollection** spaces =
new CompactionSpaceCollection*[num_spaces];
for (int i = 0; i < num_spaces; i++) {
spaces[i] = new CompactionSpaceCollection(heap);
}
return spaces;
}
void HeapTester::DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
int num_spaces) {
for (int i = 0; i < num_spaces; i++) {
delete spaces[i];
}
delete[] spaces;
}
void HeapTester::MergeCompactionSpaces(PagedSpace* space,
CompactionSpaceCollection** spaces,
int num_spaces) {
AllocationSpace id = space->identity();
for (int i = 0; i < num_spaces; i++) {
space->MergeCompactionSpace(spaces[i]->Get(id));
CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(), 0);
CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Capacity(), 0);
CHECK_EQ(spaces[i]->Get(id)->Waste(), 0);
}
}
void HeapTester::AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
AllocationSpace id, int num_spaces,
int num_objects, int object_size) {
for (int i = 0; i < num_spaces; i++) {
for (int j = 0; j < num_objects; j++) {
spaces[i]->Get(id)->AllocateRawUnaligned(object_size).ToObjectChecked();
}
spaces[i]->Get(id)->EmptyAllocationInfo();
CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(),
num_objects * object_size);
CHECK_GE(spaces[i]->Get(id)->accounting_stats_.Capacity(),
spaces[i]->Get(id)->accounting_stats_.Size());
}
}
void HeapTester::CompactionStats(CompactionSpaceCollection** spaces,
AllocationSpace id, int num_spaces,
intptr_t* capacity, intptr_t* size) {
*capacity = 0;
*size = 0;
for (int i = 0; i < num_spaces; i++) {
*capacity += spaces[i]->Get(id)->accounting_stats_.Capacity();
*size += spaces[i]->Get(id)->accounting_stats_.Size();
}
}
void HeapTester::TestCompactionSpaceDivide(int num_additional_objects,
int object_size,
int num_compaction_spaces,
int additional_capacity_in_bytes) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
CHECK(old_space != nullptr);
CHECK(old_space->SetUp());
old_space->AllocateRawUnaligned(object_size).ToObjectChecked();
old_space->EmptyAllocationInfo();
intptr_t rest_capacity = old_space->accounting_stats_.Capacity() -
old_space->accounting_stats_.Size();
intptr_t capacity_for_compaction_space =
rest_capacity / num_compaction_spaces;
int num_objects_in_compaction_space =
static_cast<int>(capacity_for_compaction_space) / object_size +
num_additional_objects;
CHECK_GT(num_objects_in_compaction_space, 0);
intptr_t initial_old_space_capacity = old_space->accounting_stats_.Capacity();
CompactionSpaceCollection** spaces =
InitializeCompactionSpaces(heap, num_compaction_spaces);
old_space->DivideUponCompactionSpaces(spaces, num_compaction_spaces,
capacity_for_compaction_space);
intptr_t compaction_capacity = 0;
intptr_t compaction_size = 0;
CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
&compaction_capacity, &compaction_size);
intptr_t old_space_capacity = old_space->accounting_stats_.Capacity();
intptr_t old_space_size = old_space->accounting_stats_.Size();
// Compaction space memory is subtracted from the original space's capacity.
CHECK_EQ(old_space_capacity,
initial_old_space_capacity - compaction_capacity);
CHECK_EQ(compaction_size, 0);
AllocateInCompactionSpaces(spaces, OLD_SPACE, num_compaction_spaces,
num_objects_in_compaction_space, object_size);
// Old space size and capacity should be the same as after dividing.
CHECK_EQ(old_space->accounting_stats_.Size(), old_space_size);
CHECK_EQ(old_space->accounting_stats_.Capacity(), old_space_capacity);
CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
&compaction_capacity, &compaction_size);
MergeCompactionSpaces(old_space, spaces, num_compaction_spaces);
CHECK_EQ(old_space->accounting_stats_.Capacity(),
old_space_capacity + compaction_capacity);
CHECK_EQ(old_space->accounting_stats_.Size(),
old_space_size + compaction_size);
// We check against the expected end capacity.
CHECK_EQ(old_space->accounting_stats_.Capacity(),
initial_old_space_capacity + additional_capacity_in_bytes);
DestroyCompactionSpaces(spaces, num_compaction_spaces);
delete old_space;
}
HEAP_TEST(CompactionSpaceDivideSinglePage) {
const int kObjectSize = KB;
const int kCompactionSpaces = 4;
// Since the bound for objects is tight and the dividing is best effort, we
// subtract some objects to make sure we still fit in the initial page.
// A CHECK makes sure that the overall number of allocated objects stays
// > 0.
const int kAdditionalObjects = -10;
const int kAdditionalCapacityRequired = 0;
TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
kAdditionalCapacityRequired);
}
HEAP_TEST(CompactionSpaceDivideMultiplePages) {
const int kObjectSize = KB;
const int kCompactionSpaces = 4;
// Allocate half a page of objects to ensure that we need one more page per
// compaction space.
const int kAdditionalObjects = (Page::kPageSize / kObjectSize / 2);
const int kAdditionalCapacityRequired =
Page::kAllocatableMemory * kCompactionSpaces;
TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
kAdditionalCapacityRequired);
}
TEST(LargeObjectSpace) {
v8::V8::Initialize();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment