Commit 055d4ee1 authored by hpayer's avatar hpayer Committed by Commit bot

Clean-up aligned allocation logic.

BUG=

Review URL: https://codereview.chromium.org/1138643005

Cr-Commit-Position: refs/heads/master@{#28430}
parent 2b17c752
......@@ -173,15 +173,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
HeapObject* object;
AllocationResult allocation;
if (NEW_SPACE == space) {
#ifndef V8_HOST_ARCH_64_BIT
if (alignment == kWordAligned) {
allocation = new_space_.AllocateRaw(size_in_bytes);
} else {
allocation = new_space_.AllocateRawAligned(size_in_bytes, alignment);
}
#else
allocation = new_space_.AllocateRaw(size_in_bytes);
#endif
allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
if (always_allocate() && allocation.IsRetry() && retry_space != NEW_SPACE) {
space = retry_space;
} else {
......@@ -193,18 +185,10 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
}
if (OLD_SPACE == space) {
#ifndef V8_HOST_ARCH_64_BIT
if (alignment == kWordAligned) {
allocation = old_space_->AllocateRaw(size_in_bytes);
} else {
allocation = old_space_->AllocateRawAligned(size_in_bytes, alignment);
}
#else
allocation = old_space_->AllocateRaw(size_in_bytes);
#endif
allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
} else if (CODE_SPACE == space) {
if (size_in_bytes <= code_space()->AreaSize()) {
allocation = code_space_->AllocateRaw(size_in_bytes);
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
} else {
// Large code objects are allocated in large object space.
allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
......@@ -213,7 +197,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else {
DCHECK(MAP_SPACE == space);
allocation = map_space_->AllocateRaw(size_in_bytes);
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
}
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
......
......@@ -1036,9 +1036,9 @@ bool Heap::ReserveSpace(Reservation* reservations) {
DCHECK_LE(size, MemoryAllocator::PageAreaSize(
static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRaw(size);
allocation = new_space()->AllocateRawUnaligned(size);
} else {
allocation = paged_space(space)->AllocateRaw(size);
allocation = paged_space(space)->AllocateRawUnaligned(size);
}
HeapObject* free_space;
if (allocation.To(&free_space)) {
......@@ -2146,17 +2146,10 @@ class ScavengingVisitor : public StaticVisitorBase {
Heap* heap = map->GetHeap();
DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
AllocationResult allocation;
#ifdef V8_HOST_ARCH_32_BIT
if (alignment == kDoubleAlignment) {
allocation =
heap->new_space()->AllocateRawAligned(object_size, kDoubleAligned);
} else {
allocation = heap->new_space()->AllocateRaw(object_size);
}
#else
allocation = heap->new_space()->AllocateRaw(object_size);
#endif
AllocationAlignment align =
alignment == kDoubleAlignment ? kDoubleAligned : kWordAligned;
AllocationResult allocation =
heap->new_space()->AllocateRaw(object_size, align);
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
......@@ -2183,17 +2176,10 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object, int object_size) {
Heap* heap = map->GetHeap();
AllocationResult allocation;
#ifdef V8_HOST_ARCH_32_BIT
if (alignment == kDoubleAlignment) {
allocation =
heap->old_space()->AllocateRawAligned(object_size, kDoubleAligned);
} else {
allocation = heap->old_space()->AllocateRaw(object_size);
}
#else
allocation = heap->old_space()->AllocateRaw(object_size);
#endif
AllocationAlignment align =
alignment == kDoubleAlignment ? kDoubleAligned : kWordAligned;
AllocationResult allocation =
heap->old_space()->AllocateRaw(object_size, align);
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
......
......@@ -1941,16 +1941,10 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
continue;
}
AllocationResult allocation;
#ifdef V8_HOST_ARCH_32_BIT
if (object->NeedsToEnsureDoubleAlignment()) {
allocation = new_space->AllocateRawAligned(size, kDoubleAligned);
} else {
allocation = new_space->AllocateRaw(size);
}
#else
allocation = new_space->AllocateRaw(size);
#endif
AllocationAlignment alignment = object->NeedsToEnsureDoubleAlignment()
? kDoubleAligned
: kWordAligned;
AllocationResult allocation = new_space->AllocateRaw(size, alignment);
if (allocation.IsRetry()) {
if (!new_space->AddFreshPage()) {
// Shouldn't happen. We are sweeping linearly, and to-space
......@@ -1958,15 +1952,7 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
// always room.
UNREACHABLE();
}
#ifdef V8_HOST_ARCH_32_BIT
if (object->NeedsToEnsureDoubleAlignment()) {
allocation = new_space->AllocateRawAligned(size, kDoubleAligned);
} else {
allocation = new_space->AllocateRaw(size);
}
#else
allocation = new_space->AllocateRaw(size);
#endif
allocation = new_space->AllocateRaw(size, alignment);
DCHECK(!allocation.IsRetry());
}
Object* target = allocation.ToObjectChecked();
......@@ -3119,16 +3105,9 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
OldSpace* old_space = heap()->old_space();
HeapObject* target;
AllocationResult allocation;
#ifdef V8_HOST_ARCH_32_BIT
if (object->NeedsToEnsureDoubleAlignment()) {
allocation = old_space->AllocateRawAligned(object_size, kDoubleAligned);
} else {
allocation = old_space->AllocateRaw(object_size);
}
#else
allocation = old_space->AllocateRaw(object_size);
#endif
AllocationAlignment alignment =
object->NeedsToEnsureDoubleAlignment() ? kDoubleAligned : kWordAligned;
AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
if (allocation.To(&target)) {
MigrateObject(target, object, object_size, old_space->identity());
heap()->IncrementPromotedObjectsSize(object_size);
......@@ -3352,13 +3331,16 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
int size = object->Size();
AllocationAlignment alignment = object->NeedsToEnsureDoubleAlignment()
? kDoubleAligned
: kWordAligned;
HeapObject* target_object;
AllocationResult allocation = space->AllocateRaw(size);
AllocationResult allocation = space->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
// If allocation failed, use emergency memory and re-try allocation.
CHECK(space->HasEmergencyMemory());
space->UseEmergencyMemory();
allocation = space->AllocateRaw(size);
allocation = space->AllocateRaw(size, alignment);
}
if (!allocation.To(&target_object)) {
// OS refused to give us memory.
......
......@@ -277,7 +277,7 @@ HeapObject* PagedSpace::AllocateLinearlyAligned(int size_in_bytes,
// Raw allocation.
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object == NULL) {
......@@ -325,6 +325,18 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
#ifdef V8_HOST_ARCH_32_BIT
return alignment == kDoubleAligned
? AllocateRawAligned(size_in_bytes, kDoubleAligned)
: AllocateRawUnaligned(size_in_bytes);
#else
return AllocateRawUnaligned(size_in_bytes);
#endif
}
// -----------------------------------------------------------------------------
// NewSpace
......@@ -368,7 +380,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
}
AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
Address old_top = allocation_info_.top();
if (allocation_info_.limit() - old_top < size_in_bytes) {
......@@ -386,6 +398,18 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
}
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
#ifdef V8_HOST_ARCH_32_BIT
return alignment == kDoubleAligned
? AllocateRawAligned(size_in_bytes, kDoubleAligned)
: AllocateRawUnaligned(size_in_bytes);
#else
return AllocateRawUnaligned(size_in_bytes);
#endif
}
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
return static_cast<LargePage*>(chunk);
......
......@@ -1478,7 +1478,7 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
return AllocateRawAligned(size_in_bytes, kDoubleAligned);
else if (alignment == kDoubleUnaligned)
return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
return AllocateRaw(size_in_bytes);
return AllocateRawUnaligned(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
......@@ -1489,7 +1489,7 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
return AllocateRawAligned(size_in_bytes, kDoubleAligned);
else if (alignment == kDoubleUnaligned)
return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
return AllocateRaw(size_in_bytes);
return AllocateRawUnaligned(size_in_bytes);
} else {
return AllocationResult::Retry();
}
......
......@@ -1764,13 +1764,19 @@ class PagedSpace : public Space {
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
// Allocate the requested number of bytes in the space and consider allocation
// alignment if needed.
MUST_USE_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment);
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
......@@ -2501,7 +2507,11 @@ class NewSpace : public Space {
MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment));
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
MUST_USE_RESULT INLINE(
AllocationResult AllocateRawUnaligned(int size_in_bytes));
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment));
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
......
......@@ -504,8 +504,8 @@ static inline void ExpectUndefined(const char* code) {
// Helper function that simulates a full new-space in the heap.
static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
v8::internal::AllocationResult allocation =
space->AllocateRaw(v8::internal::Page::kMaxRegularHeapObjectSize);
v8::internal::AllocationResult allocation = space->AllocateRawUnaligned(
v8::internal::Page::kMaxRegularHeapObjectSize);
if (allocation.IsRetry()) return false;
v8::internal::HeapObject* free_space = NULL;
CHECK(allocation.To(&free_space));
......@@ -524,7 +524,7 @@ static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
int new_linear_size = space_remaining - extra_bytes;
if (new_linear_size == 0) return;
v8::internal::AllocationResult allocation =
space->AllocateRaw(new_linear_size);
space->AllocateRawUnaligned(new_linear_size);
v8::internal::HeapObject* free_space = NULL;
CHECK(allocation.To(&free_space));
space->heap()->CreateFillerObjectAt(free_space->address(), new_linear_size);
......
......@@ -3946,7 +3946,8 @@ TEST(Regress169928) {
// We need filler the size of AllocationMemento object, plus an extra
// fill pointer value.
HeapObject* obj = NULL;
AllocationResult allocation = CcTest::heap()->new_space()->AllocateRaw(
AllocationResult allocation =
CcTest::heap()->new_space()->AllocateRawUnaligned(
AllocationMemento::kSize + kPointerSize);
CHECK(allocation.To(&obj));
Address addr_obj = obj->address();
......
......@@ -358,8 +358,9 @@ TEST(NewSpace) {
CHECK(new_space.HasBeenSetUp());
while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
Object* obj = new_space.AllocateRaw(
Page::kMaxRegularHeapObjectSize).ToObjectChecked();
Object* obj =
new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
.ToObjectChecked();
CHECK(new_space.Contains(HeapObject::cast(obj)));
}
......@@ -384,7 +385,7 @@ TEST(OldSpace) {
CHECK(s->SetUp());
while (s->Available() > 0) {
s->AllocateRaw(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
}
s->TearDown();
......@@ -485,7 +486,8 @@ UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
// Try to allocate out of the new space. A new page should be added and
// the
// allocation should succeed.
v8::internal::AllocationResult allocation = new_space->AllocateRaw(80);
v8::internal::AllocationResult allocation =
new_space->AllocateRawUnaligned(80);
CHECK(!allocation.IsRetry());
CHECK(new_space->CommittedMemory() == 2 * Page::kPageSize);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment