Commit a4e3a3b6 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Move to LAB-based allocation for newspace evacuation.

This CL prepare newspace evacuation for parallel execution wrt. to actual
allocations. The priority for allocations is:
* Try to allocate from LAB if objects are below kMaxLabObjectSize
* Allocate directly (synchronized) from newspace for larger objects.
* Fall back to old space allocation (which will be backed by a local compaction
  space in future).

Semantical change: Previously we did fall back to regular new space promotion if
we are OOM in old space. With this CL we fall back to new space promotion, which
could fail because of fragmentation, again leading to an old space allocation
that finally bails into OOM.

Newspace evacuation is still single threaded and requires further changes to
allocation site tracking.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1487853002

Cr-Commit-Position: refs/heads/master@{#32970}
parent 7bc8fac4
...@@ -1607,12 +1607,17 @@ class MarkCompactCollector::EvacuateVisitorBase ...@@ -1607,12 +1607,17 @@ class MarkCompactCollector::EvacuateVisitorBase
}; };
class MarkCompactCollector::EvacuateNewSpaceVisitor class MarkCompactCollector::EvacuateNewSpaceVisitor final
: public MarkCompactCollector::EvacuateVisitorBase { : public MarkCompactCollector::EvacuateVisitorBase {
public: public:
static const intptr_t kLabSize = 4 * KB;
static const intptr_t kMaxLabObjectSize = 256;
explicit EvacuateNewSpaceVisitor(Heap* heap, explicit EvacuateNewSpaceVisitor(Heap* heap,
SlotsBuffer** evacuation_slots_buffer) SlotsBuffer** evacuation_slots_buffer)
: EvacuateVisitorBase(heap, evacuation_slots_buffer) {} : EvacuateVisitorBase(heap, evacuation_slots_buffer),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
space_to_allocate_(NEW_SPACE) {}
bool Visit(HeapObject* object) override { bool Visit(HeapObject* object) override {
Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
...@@ -1628,34 +1633,119 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor ...@@ -1628,34 +1633,119 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor
heap_->IncrementPromotedObjectsSize(size); heap_->IncrementPromotedObjectsSize(size);
return true; return true;
} }
HeapObject* target = nullptr;
AllocationSpace space = AllocateTargetObject(object, &target);
heap_->mark_compact_collector()->MigrateObject(
HeapObject::cast(target), object, size, space,
(space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
}
heap_->IncrementSemiSpaceCopiedObjectSize(size);
return true;
}
AllocationAlignment alignment = object->RequiredAlignment(); private:
enum NewSpaceAllocationMode {
kNonstickyBailoutOldSpace,
kStickyBailoutOldSpace,
};
inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
HeapObject** target_object) {
const int size = old_object->Size();
AllocationAlignment alignment = old_object->RequiredAlignment();
AllocationResult allocation;
if (space_to_allocate_ == NEW_SPACE) {
if (size > kMaxLabObjectSize) {
allocation =
AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
} else {
allocation = AllocateInLab(size, alignment);
}
}
if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
allocation = AllocateInOldSpace(size, alignment);
}
bool ok = allocation.To(target_object);
DCHECK(ok);
USE(ok);
return space_to_allocate_;
}
inline bool NewLocalAllocationBuffer() {
AllocationResult result =
AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
LocalAllocationBuffer saved_old_buffer = buffer_;
buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
if (buffer_.IsValid()) {
buffer_.TryMerge(&saved_old_buffer);
return true;
}
return false;
}
inline AllocationResult AllocateInNewSpace(int size_in_bytes,
AllocationAlignment alignment,
NewSpaceAllocationMode mode) {
AllocationResult allocation = AllocationResult allocation =
heap_->new_space()->AllocateRaw(size, alignment); heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
if (allocation.IsRetry()) { if (allocation.IsRetry()) {
if (!heap_->new_space()->AddFreshPage()) { if (!heap_->new_space()->AddFreshPageSynchronized()) {
// Shouldn't happen. We are sweeping linearly, and to-space if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
// has the same number of pages as from-space, so there is } else {
// always room unless we are in an OOM situation. allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); alignment);
if (allocation.IsRetry()) {
if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
}
} }
allocation = heap_->new_space()->AllocateRaw(size, alignment);
DCHECK(!allocation.IsRetry());
} }
Object* target = allocation.ToObjectChecked(); return allocation;
}
heap_->mark_compact_collector()->MigrateObject( inline AllocationResult AllocateInOldSpace(int size_in_bytes,
HeapObject::cast(target), object, size, NEW_SPACE, nullptr); AllocationAlignment alignment) {
if (V8_UNLIKELY(target->IsJSArrayBuffer())) { AllocationResult allocation =
heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
if (allocation.IsRetry()) {
FatalProcessOutOfMemory(
"MarkCompactCollector: semi-space copy, fallback in old gen\n");
} }
heap_->IncrementSemiSpaceCopiedObjectSize(size); return allocation;
return true;
} }
inline AllocationResult AllocateInLab(int size_in_bytes,
AllocationAlignment alignment) {
AllocationResult allocation;
if (!buffer_.IsValid()) {
if (!NewLocalAllocationBuffer()) {
space_to_allocate_ = OLD_SPACE;
return AllocationResult::Retry(OLD_SPACE);
}
}
allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
if (allocation.IsRetry()) {
if (!NewLocalAllocationBuffer()) {
space_to_allocate_ = OLD_SPACE;
return AllocationResult::Retry(OLD_SPACE);
} else {
allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
if (allocation.IsRetry()) {
space_to_allocate_ = OLD_SPACE;
return AllocationResult::Retry(OLD_SPACE);
}
}
}
return allocation;
}
LocalAllocationBuffer buffer_;
AllocationSpace space_to_allocate_;
}; };
class MarkCompactCollector::EvacuateOldSpaceVisitor class MarkCompactCollector::EvacuateOldSpaceVisitor final
: public MarkCompactCollector::EvacuateVisitorBase { : public MarkCompactCollector::EvacuateVisitorBase {
public: public:
EvacuateOldSpaceVisitor(Heap* heap, EvacuateOldSpaceVisitor(Heap* heap,
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_INL_H_ #ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_ #define V8_HEAP_SPACES_INL_H_
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
#include "src/isolate.h" #include "src/isolate.h"
#include "src/msan.h" #include "src/msan.h"
...@@ -49,20 +50,21 @@ Page* PageIterator::next() { ...@@ -49,20 +50,21 @@ Page* PageIterator::next() {
// SemiSpaceIterator // SemiSpaceIterator
HeapObject* SemiSpaceIterator::Next() { HeapObject* SemiSpaceIterator::Next() {
if (current_ == limit_) return NULL; while (current_ != limit_) {
if (NewSpacePage::IsAtEnd(current_)) { if (NewSpacePage::IsAtEnd(current_)) {
NewSpacePage* page = NewSpacePage::FromLimit(current_); NewSpacePage* page = NewSpacePage::FromLimit(current_);
page = page->next_page(); page = page->next_page();
DCHECK(!page->is_anchor()); DCHECK(!page->is_anchor());
current_ = page->area_start(); current_ = page->area_start();
if (current_ == limit_) return NULL; if (current_ == limit_) return nullptr;
}
HeapObject* object = HeapObject::FromAddress(current_);
current_ += object->Size();
if (!object->IsFiller()) {
return object;
}
} }
return nullptr;
HeapObject* object = HeapObject::FromAddress(current_);
int size = object->Size();
current_ += size;
return object;
} }
...@@ -319,6 +321,24 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { ...@@ -319,6 +321,24 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
} }
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + size_in_bytes;
if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
allocation_info_.set_top(new_top);
if (filler_size > 0) {
return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
filler_size);
}
return AllocationResult(HeapObject::FromAddress(current_top));
}
HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes, HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment) { AllocationAlignment alignment) {
Address current_top = allocation_info_.top(); Address current_top = allocation_info_.top();
...@@ -482,6 +502,13 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes, ...@@ -482,6 +502,13 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
} }
MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment) {
base::LockGuard<base::Mutex> guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment);
}
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
heap->incremental_marking()->SetOldSpacePageFlags(chunk); heap->incremental_marking()->SetOldSpacePageFlags(chunk);
return static_cast<LargePage*>(chunk); return static_cast<LargePage*>(chunk);
...@@ -492,6 +519,34 @@ intptr_t LargeObjectSpace::Available() { ...@@ -492,6 +519,34 @@ intptr_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
} }
LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {
if (result.IsRetry()) return InvalidBuffer();
HeapObject* obj = nullptr;
bool ok = result.To(&obj);
USE(ok);
DCHECK(ok);
Address top = HeapObject::cast(obj)->address();
return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
}
bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
if (allocation_info_.top() == other->allocation_info_.limit()) {
allocation_info_.set_top(other->allocation_info_.top());
other->allocation_info_.Reset(nullptr, nullptr);
return true;
}
return false;
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -958,8 +958,7 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, ...@@ -958,8 +958,7 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
area_size_ = MemoryAllocator::PageAreaSize(space); area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear(); accounting_stats_.Clear();
allocation_info_.set_top(NULL); allocation_info_.Reset(nullptr, nullptr);
allocation_info_.set_limit(NULL);
anchor_.InitializeAsAnchor(this); anchor_.InitializeAsAnchor(this);
} }
...@@ -1248,8 +1247,7 @@ void PagedSpace::ReleasePage(Page* page) { ...@@ -1248,8 +1247,7 @@ void PagedSpace::ReleasePage(Page* page) {
DCHECK(!free_list_.ContainsPageFreeListItems(page)); DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) { if (Page::FromAllocationTop(allocation_info_.top()) == page) {
allocation_info_.set_top(NULL); allocation_info_.Reset(nullptr, nullptr);
allocation_info_.set_limit(NULL);
} }
// If page is still in a list, unlink it from that list. // If page is still in a list, unlink it from that list.
...@@ -1390,8 +1388,8 @@ void NewSpace::TearDown() { ...@@ -1390,8 +1388,8 @@ void NewSpace::TearDown() {
} }
start_ = NULL; start_ = NULL;
allocation_info_.set_top(NULL); allocation_info_.Reset(nullptr, nullptr);
allocation_info_.set_limit(NULL);
to_space_.TearDown(); to_space_.TearDown();
from_space_.TearDown(); from_space_.TearDown();
...@@ -1480,10 +1478,50 @@ void NewSpace::Shrink() { ...@@ -1480,10 +1478,50 @@ void NewSpace::Shrink() {
} }
void LocalAllocationBuffer::Close() {
if (IsValid()) {
heap_->CreateFillerObjectAt(
allocation_info_.top(),
static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
}
}
LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
AllocationInfo allocation_info)
: heap_(heap), allocation_info_(allocation_info) {
if (IsValid()) {
heap_->CreateFillerObjectAt(
allocation_info_.top(),
static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
}
}
LocalAllocationBuffer::LocalAllocationBuffer(
const LocalAllocationBuffer& other) {
*this = other;
}
LocalAllocationBuffer& LocalAllocationBuffer::operator=(
const LocalAllocationBuffer& other) {
Close();
heap_ = other.heap_;
allocation_info_ = other.allocation_info_;
// This is needed since we (a) cannot yet use move-semantics, and (b) want
// to make the use of the class easy by it as value and (c) implicitly call
// {Close} upon copy.
const_cast<LocalAllocationBuffer&>(other)
.allocation_info_.Reset(nullptr, nullptr);
return *this;
}
void NewSpace::UpdateAllocationInfo() { void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(to_space_.page_low()); allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
allocation_info_.set_limit(to_space_.page_high());
UpdateInlineAllocationLimit(0); UpdateInlineAllocationLimit(0);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
} }
...@@ -1566,6 +1604,12 @@ bool NewSpace::AddFreshPage() { ...@@ -1566,6 +1604,12 @@ bool NewSpace::AddFreshPage() {
} }
bool NewSpace::AddFreshPageSynchronized() {
base::LockGuard<base::Mutex> guard(&mutex_);
return AddFreshPage();
}
bool NewSpace::EnsureAllocation(int size_in_bytes, bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) { AllocationAlignment alignment) {
Address old_top = allocation_info_.top(); Address old_top = allocation_info_.top();
...@@ -2763,9 +2807,7 @@ void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() { ...@@ -2763,9 +2807,7 @@ void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
int remaining = int remaining =
static_cast<int>(allocation_info_.limit() - allocation_info_.top()); static_cast<int>(allocation_info_.limit() - allocation_info_.top());
heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
allocation_info_.Reset(nullptr, nullptr);
allocation_info_.set_top(nullptr);
allocation_info_.set_limit(nullptr);
} }
} }
......
...@@ -1472,7 +1472,13 @@ class PageIterator BASE_EMBEDDED { ...@@ -1472,7 +1472,13 @@ class PageIterator BASE_EMBEDDED {
// space. // space.
class AllocationInfo { class AllocationInfo {
public: public:
AllocationInfo() : top_(NULL), limit_(NULL) {} AllocationInfo() : top_(nullptr), limit_(nullptr) {}
AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
void Reset(Address top, Address limit) {
set_top(top);
set_limit(limit);
}
INLINE(void set_top(Address top)) { INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL || SLOW_DCHECK(top == NULL ||
...@@ -1869,6 +1875,60 @@ class AllocationResult { ...@@ -1869,6 +1875,60 @@ class AllocationResult {
STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize); STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
// LocalAllocationBuffer represents a linear allocation area that is created
// from a given {AllocationResult} and can be used to allocate memory without
// synchronization.
//
// The buffer is properly closed upon destruction and reassignment.
// Example:
// {
// AllocationResult result = ...;
// LocalAllocationBuffer a(heap, result, size);
// LocalAllocationBuffer b = a;
// CHECK(!a.IsValid());
// CHECK(b.IsValid());
// // {a} is invalid now and cannot be used for further allocations.
// }
// // Since {b} went out of scope, the LAB is closed, resulting in creating a
// // filler object for the remaining area.
class LocalAllocationBuffer {
public:
// Indicates that a buffer cannot be used for allocations anymore. Can result
// from either reassigning a buffer, or trying to construct it from an
// invalid {AllocationResult}.
static inline LocalAllocationBuffer InvalidBuffer();
// Creates a new LAB from a given {AllocationResult}. Results in
// InvalidBuffer if the result indicates a retry.
static inline LocalAllocationBuffer FromResult(Heap* heap,
AllocationResult result,
intptr_t size);
~LocalAllocationBuffer() { Close(); }
// Convert to C++11 move-semantics once allowed by the style guide.
LocalAllocationBuffer(const LocalAllocationBuffer& other);
LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
inline bool IsValid() { return allocation_info_.top() != nullptr; }
// Try to merge LABs, which is only possible when they are adjacent in memory.
// Returns true if the merge was successful, false otherwise.
inline bool TryMerge(LocalAllocationBuffer* other);
private:
LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
void Close();
Heap* heap_;
AllocationInfo allocation_info_;
};
class PagedSpace : public Space { class PagedSpace : public Space {
public: public:
static const intptr_t kCompactionMemoryWanted = 500 * KB; static const intptr_t kCompactionMemoryWanted = 500 * KB;
...@@ -1999,8 +2059,7 @@ class PagedSpace : public Space { ...@@ -1999,8 +2059,7 @@ class PagedSpace : public Space {
DCHECK(top == limit || DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1)); Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(top); allocation_info_.Reset(top, limit);
allocation_info_.set_limit(limit);
} }
// Empty space allocation info, returning unused area to free list. // Empty space allocation info, returning unused area to free list.
...@@ -2741,6 +2800,9 @@ class NewSpace : public Space { ...@@ -2741,6 +2800,9 @@ class NewSpace : public Space {
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw( MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment)); int size_in_bytes, AllocationAlignment alignment));
MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment);
// Reset the allocation pointer to the beginning of the active semispace. // Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo(); void ResetAllocationInfo();
...@@ -2790,6 +2852,7 @@ class NewSpace : public Space { ...@@ -2790,6 +2852,7 @@ class NewSpace : public Space {
// are no pages, or the current page is already empty), or true // are no pages, or the current page is already empty), or true
// if successful. // if successful.
bool AddFreshPage(); bool AddFreshPage();
bool AddFreshPageSynchronized();
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
// Verify the active semispace. // Verify the active semispace.
...@@ -2833,6 +2896,8 @@ class NewSpace : public Space { ...@@ -2833,6 +2896,8 @@ class NewSpace : public Space {
// Update allocation info to match the current to-space page. // Update allocation info to match the current to-space page.
void UpdateAllocationInfo(); void UpdateAllocationInfo();
base::Mutex mutex_;
Address chunk_base_; Address chunk_base_;
uintptr_t chunk_size_; uintptr_t chunk_size_;
......
...@@ -99,6 +99,7 @@ ...@@ -99,6 +99,7 @@
'heap/test-compaction.cc', 'heap/test-compaction.cc',
'heap/test-heap.cc', 'heap/test-heap.cc',
'heap/test-incremental-marking.cc', 'heap/test-incremental-marking.cc',
'heap/test-lab.cc',
'heap/test-mark-compact.cc', 'heap/test-mark-compact.cc',
'heap/test-spaces.cc', 'heap/test-spaces.cc',
'heap/utils-inl.h', 'heap/utils-inl.h',
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <vector>
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/heap/spaces-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
static Address AllocateLabBackingStore(Heap* heap, intptr_t size_in_bytes) {
AllocationResult result = heap->old_space()->AllocateRaw(
static_cast<int>(size_in_bytes), kDoubleAligned);
Object* obj = result.ToObjectChecked();
Address adr = HeapObject::cast(obj)->address();
return adr;
}
static void VerifyIterable(v8::internal::Address base,
v8::internal::Address limit,
std::vector<intptr_t> expected_size) {
CHECK_LE(reinterpret_cast<intptr_t>(base), reinterpret_cast<intptr_t>(limit));
HeapObject* object = nullptr;
size_t counter = 0;
while (base < limit) {
object = HeapObject::FromAddress(base);
CHECK(object->IsFiller());
CHECK_LT(counter, expected_size.size());
CHECK_EQ(expected_size[counter], object->Size());
base += object->Size();
counter++;
}
}
static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
intptr_t size_in_bytes,
AllocationAlignment alignment = kWordAligned) {
HeapObject* obj;
AllocationResult result =
lab->AllocateRawAligned(static_cast<int>(size_in_bytes), alignment);
if (result.To(&obj)) {
heap->CreateFillerObjectAt(obj->address(), static_cast<int>(size_in_bytes));
return true;
}
return false;
}
TEST(InvalidLab) {
LocalAllocationBuffer lab = LocalAllocationBuffer::InvalidBuffer();
CHECK(!lab.IsValid());
}
TEST(UnusedLabImplicitClose) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
heap->root(Heap::kOnePointerFillerMapRootIndex);
const int kLabSize = 4 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
intptr_t expected_sizes_raw[1] = {kLabSize};
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 1);
{
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
}
VerifyIterable(base, limit, expected_sizes);
}
TEST(SimpleAllocate) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 4 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
intptr_t sizes_raw[1] = {128};
intptr_t expected_sizes_raw[2] = {128, kLabSize - 128};
std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 1);
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 2);
{
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
intptr_t sum = 0;
for (auto size : sizes) {
if (AllocateFromLab(heap, &lab, size)) {
sum += size;
}
}
}
VerifyIterable(base, limit, expected_sizes);
}
TEST(AllocateUntilLabOOM) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
// The following objects won't fit in {kLabSize}.
intptr_t sizes_raw[5] = {512, 512, 128, 512, 512};
intptr_t expected_sizes_raw[5] = {512, 512, 128, 512, 384 /* left over */};
std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 5);
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 5);
intptr_t sum = 0;
{
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
for (auto size : sizes) {
if (AllocateFromLab(heap, &lab, size)) {
sum += size;
}
}
CHECK_EQ(kLabSize - sum, 384);
}
VerifyIterable(base, limit, expected_sizes);
}
TEST(AllocateExactlyUntilLimit) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
intptr_t sizes_raw[4] = {512, 512, 512, 512};
intptr_t expected_sizes_raw[5] = {512, 512, 512, 512, 0};
std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 4);
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 5);
{
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
intptr_t sum = 0;
for (auto size : sizes) {
if (AllocateFromLab(heap, &lab, size)) {
sum += size;
} else {
break;
}
}
CHECK_EQ(kLabSize - sum, 0);
}
VerifyIterable(base, limit, expected_sizes);
}
TEST(MergeSuccessful) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base1 = AllocateLabBackingStore(heap, kLabSize);
Address limit1 = base1 + kLabSize;
Address base2 = limit1;
Address limit2 = base2 + kLabSize;
intptr_t sizes1_raw[4] = {512, 512, 512, 256};
intptr_t expected_sizes1_raw[5] = {512, 512, 512, 256, 256};
std::vector<intptr_t> sizes1(sizes1_raw, sizes1_raw + 4);
std::vector<intptr_t> expected_sizes1(expected_sizes1_raw,
expected_sizes1_raw + 5);
intptr_t sizes2_raw[5] = {256, 512, 512, 512, 512};
intptr_t expected_sizes2_raw[10] = {512, 512, 512, 256, 256,
512, 512, 512, 512, 0};
std::vector<intptr_t> sizes2(sizes2_raw, sizes2_raw + 5);
std::vector<intptr_t> expected_sizes2(expected_sizes2_raw,
expected_sizes2_raw + 10);
{
AllocationResult lab_backing_store1(HeapObject::FromAddress(base1));
LocalAllocationBuffer lab1 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
CHECK(lab1.IsValid());
intptr_t sum = 0;
for (auto size : sizes1) {
if (AllocateFromLab(heap, &lab1, size)) {
sum += size;
} else {
break;
}
}
AllocationResult lab_backing_store2(HeapObject::FromAddress(base2));
LocalAllocationBuffer lab2 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
CHECK(lab2.IsValid());
CHECK(lab2.TryMerge(&lab1));
CHECK(!lab1.IsValid());
for (auto size : sizes2) {
if (AllocateFromLab(heap, &lab2, size)) {
sum += size;
} else {
break;
}
}
CHECK_EQ(2 * kLabSize - sum, 0);
}
VerifyIterable(base1, limit1, expected_sizes1);
VerifyIterable(base1, limit2, expected_sizes2);
}
TEST(MergeFailed) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base1 = AllocateLabBackingStore(heap, kLabSize);
Address base2 = base1 + kLabSize;
Address base3 = base2 + kLabSize;
{
AllocationResult lab_backing_store1(HeapObject::FromAddress(base1));
LocalAllocationBuffer lab1 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
CHECK(lab1.IsValid());
AllocationResult lab_backing_store2(HeapObject::FromAddress(base2));
LocalAllocationBuffer lab2 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
CHECK(lab2.IsValid());
AllocationResult lab_backing_store3(HeapObject::FromAddress(base3));
LocalAllocationBuffer lab3 =
LocalAllocationBuffer::FromResult(heap, lab_backing_store3, kLabSize);
CHECK(lab3.IsValid());
CHECK(!lab3.TryMerge(&lab1));
}
}
#ifdef V8_HOST_ARCH_32_BIT
TEST(AllocateAligned) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
std::pair<intptr_t, AllocationAlignment> sizes_raw[2] = {
std::make_pair(116, kWordAligned), std::make_pair(64, kDoubleAligned)};
std::vector<std::pair<intptr_t, AllocationAlignment>> sizes(sizes_raw,
sizes_raw + 2);
intptr_t expected_sizes_raw[4] = {116, 4, 64, 1864};
std::vector<intptr_t> expected_sizes(expected_sizes_raw,
expected_sizes_raw + 4);
{
AllocationResult lab_backing_store(HeapObject::FromAddress(base));
LocalAllocationBuffer lab =
LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
CHECK(lab.IsValid());
for (auto pair : sizes) {
if (!AllocateFromLab(heap, &lab, pair.first, pair.second)) {
break;
}
}
}
VerifyIterable(base, limit, expected_sizes);
}
#endif // V8_HOST_ARCH_32_BIT
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment