Commit 147330f3 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Add compaction space.

The CompactionSpace is temporarily used during compaction to hold migrated
objects. The payload is merged back into the corresponding space after
compaction.

Note the this is not the complete implementation and it is currently only used in a test.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1314493007

Cr-Commit-Position: refs/heads/master@{#30407}
parent bc4a198d
......@@ -5780,19 +5780,16 @@ void Heap::TearDown() {
new_space_.TearDown();
if (old_space_ != NULL) {
old_space_->TearDown();
delete old_space_;
old_space_ = NULL;
}
if (code_space_ != NULL) {
code_space_->TearDown();
delete code_space_;
code_space_ = NULL;
}
if (map_space_ != NULL) {
map_space_->TearDown();
delete map_space_;
map_space_ = NULL;
}
......
......@@ -729,9 +729,7 @@ void Page::ResetFreeListStatistics() {
Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
if (chunk == NULL) return NULL;
return Page::Initialize(isolate_->heap(), chunk, executable, owner);
}
......@@ -1003,6 +1001,43 @@ void PagedSpace::TearDown() {
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Unmerged fields:
// area_size_
// allocation_info_
// emergency_memory_
// end_of_unswept_pages_
// unswept_free_bytes_
// anchor_
// It only makes sense to merge compatible spaces.
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to (a) not
// waste the memory and (b) keep the rest of the chunk in an iterable state
// (filler is needed).
int linear_size = static_cast<int>(other->limit() - other->top());
other->Free(other->top(), linear_size);
// Move over the free list.
free_list_.Concatenate(other->free_list());
// Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_);
other->accounting_stats_.Clear();
// Move over pages.
PageIterator it(other);
Page* p = nullptr;
while (it.has_next()) {
p = it.next();
p->Unlink();
p->set_owner(this);
p->InsertAfter(anchor_.prev_page());
}
}
size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
......@@ -1062,8 +1097,7 @@ bool PagedSpace::Expand() {
if (!CanExpand()) return false;
intptr_t size = AreaSize();
if (anchor_.next_page() == &anchor_) {
if (snapshotable() && !HasPages()) {
size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
}
......@@ -2652,7 +2686,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Try to expand the space and allocate in the new next page.
if (Expand()) {
DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
DCHECK((CountTotalPages() > 1) ||
(size_in_bytes <= free_list_.available()));
return free_list_.Allocate(size_in_bytes);
}
......
......@@ -91,13 +91,13 @@ class Isolate;
#define DCHECK_MAP_PAGE_INDEX(index) \
DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
class PagedSpace;
class MemoryAllocator;
class AllocationInfo;
class Space;
class CompactionSpace;
class FreeList;
class MemoryAllocator;
class MemoryChunk;
class PagedSpace;
class Space;
class MarkBit {
public:
......@@ -1466,6 +1466,16 @@ class AllocationStats BASE_EMBEDDED {
waste_ += size_in_bytes;
}
// Merge {other} into {this}.
void Merge(const AllocationStats& other) {
capacity_ += other.capacity_;
size_ += other.size_;
waste_ += other.waste_;
if (other.max_capacity_ > max_capacity_) {
max_capacity_ = other.max_capacity_;
}
}
private:
intptr_t capacity_;
intptr_t max_capacity_;
......@@ -1696,7 +1706,7 @@ class PagedSpace : public Space {
// Creates a space with an id.
PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
virtual ~PagedSpace() {}
virtual ~PagedSpace() { TearDown(); }
// Set up the space using the given address range of virtual memory (from
// the memory allocator's initial chunk) if possible. If the block of
......@@ -1708,10 +1718,6 @@ class PagedSpace : public Space {
// subsequently torn down.
bool HasBeenSetUp();
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
void TearDown();
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
inline bool Contains(HeapObject* o);
......@@ -1926,9 +1932,47 @@ class PagedSpace : public Space {
bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other);
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
FreeList* free_list() { return &free_list_; }
bool HasPages() { return anchor_.next_page() != &anchor_; }
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
void TearDown();
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
bool Expand();
// Generic fast case allocation function that tries linear allocation at the
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
// Generic fast case allocation function that tries aligned linear allocation
// at the address denoted by top in allocation_info_. Writes the aligned
// allocation size, which includes the filler size, to size_in_bytes.
inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
int area_size_;
// Accounting information for this space.
......@@ -1958,30 +2002,6 @@ class PagedSpace : public Space {
// If not used, the emergency memory is released after compaction.
MemoryChunk* emergency_memory_;
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
bool Expand();
// Generic fast case allocation function that tries linear allocation at the
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
// Generic fast case allocation function that tries aligned linear allocation
// at the address denoted by top in allocation_info_. Writes the aligned
// allocation size, which includes the filler size, to size_in_bytes.
inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
friend class MarkCompactCollector;
};
......@@ -2657,6 +2677,19 @@ class NewSpace : public Space {
friend class SemiSpaceIterator;
};
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.
class CompactionSpace : public PagedSpace {
public:
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
: PagedSpace(heap, id, executable) {}
protected:
// The space is temporary and not included in any snapshots.
virtual bool snapshotable() { return false; }
};
// -----------------------------------------------------------------------------
// Old object space (includes the old space of objects and code space)
......
......@@ -305,40 +305,44 @@ TEST(MemoryAllocator) {
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator != nullptr);
CHECK(memory_allocator->SetUp(heap->MaxReserved(),
heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
int total_pages = 0;
OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
{
int total_pages = 0;
OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid());
CHECK(first_page->next_page() == faked_space.anchor());
total_pages++;
for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
CHECK(p->owner() == &faked_space);
}
first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid());
CHECK(first_page->next_page() == faked_space.anchor());
total_pages++;
// Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage(faked_space.AreaSize(),
&faked_space, NOT_EXECUTABLE);
CHECK(other->is_valid());
total_pages++;
other->InsertAfter(first_page);
int page_count = 0;
for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
CHECK(p->owner() == &faked_space);
page_count++;
}
CHECK(total_pages == page_count);
for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
CHECK(p->owner() == &faked_space);
}
Page* second_page = first_page->next_page();
CHECK(second_page->is_valid());
// Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage(
faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
CHECK(other->is_valid());
total_pages++;
other->InsertAfter(first_page);
int page_count = 0;
for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
CHECK(p->owner() == &faked_space);
page_count++;
// OldSpace's destructor will tear down the space and free up all pages.
}
CHECK(total_pages == page_count);
Page* second_page = first_page->next_page();
CHECK(second_page->is_valid());
memory_allocator->Free(first_page);
memory_allocator->Free(second_page);
memory_allocator->TearDown();
delete memory_allocator;
}
......@@ -388,13 +392,55 @@ TEST(OldSpace) {
s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
}
s->TearDown();
delete s;
memory_allocator->TearDown();
delete memory_allocator;
}
TEST(CompactionSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator != nullptr);
CHECK(
memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
CompactionSpace* compaction_space =
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
CHECK(compaction_space != NULL);
CHECK(compaction_space->SetUp());
OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
CHECK(old_space != NULL);
CHECK(old_space->SetUp());
// Cannot loop until "Available()" since we initially have 0 bytes available
// and would thus neither grow, nor be able to allocate an object.
const int kNumObjects = 100;
const int kExpectedPages = kNumObjects;
for (int i = 0; i < kNumObjects; i++) {
compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
.ToObjectChecked();
}
int pages_in_old_space = old_space->CountTotalPages();
int pages_in_compaction_space = compaction_space->CountTotalPages();
CHECK_EQ(pages_in_compaction_space, kExpectedPages);
CHECK_LE(pages_in_old_space, 1);
old_space->MergeCompactionSpace(compaction_space);
CHECK_EQ(old_space->CountTotalPages(),
pages_in_old_space + pages_in_compaction_space);
delete compaction_space;
delete old_space;
memory_allocator->TearDown();
delete memory_allocator;
}
TEST(LargeObjectSpace) {
v8::V8::Initialize();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment