Commit fe0e49df authored by hpayer's avatar hpayer Committed by Commit bot

Move double alignment logic into memory allocator.

BUG=chromium:436911

Review URL: https://codereview.chromium.org/1127993002

Cr-Commit-Position: refs/heads/master@{#28262}
parent 6618793e
...@@ -1941,24 +1941,19 @@ STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset & ...@@ -1941,24 +1941,19 @@ STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
kDoubleAlignmentMask) == 0); // NOLINT kDoubleAlignmentMask) == 0); // NOLINT
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object, HeapObject* Heap::EnsureDoubleAligned(HeapObject* object, int size) {
int size));
static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
int size) {
if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) { if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
heap->CreateFillerObjectAt(object->address(), kPointerSize); CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize); return HeapObject::FromAddress(object->address() + kPointerSize);
} else { } else {
heap->CreateFillerObjectAt(object->address() + size - kPointerSize, CreateFillerObjectAt(object->address() + size - kPointerSize, kPointerSize);
kPointerSize);
return object; return object;
} }
} }
HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) { HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
return EnsureDoubleAligned(this, object, size); return EnsureDoubleAligned(object, size);
} }
...@@ -2108,15 +2103,13 @@ class ScavengingVisitor : public StaticVisitorBase { ...@@ -2108,15 +2103,13 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object, int object_size) { HeapObject* object, int object_size) {
Heap* heap = map->GetHeap(); Heap* heap = map->GetHeap();
int allocation_size = object_size;
if (alignment != kObjectAlignment) {
DCHECK(alignment == kDoubleAlignment);
allocation_size += kPointerSize;
}
DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE)); DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
AllocationResult allocation = AllocationResult allocation;
heap->new_space()->AllocateRaw(allocation_size); if (alignment == kDoubleAlignment) {
allocation = heap->new_space()->AllocateRawDoubleAligned(object_size);
} else {
allocation = heap->new_space()->AllocateRaw(object_size);
}
HeapObject* target = NULL; // Initialization to please compiler. HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) { if (allocation.To(&target)) {
...@@ -2126,9 +2119,6 @@ class ScavengingVisitor : public StaticVisitorBase { ...@@ -2126,9 +2119,6 @@ class ScavengingVisitor : public StaticVisitorBase {
// object. // object.
heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
MigrateObject(heap, object, target, object_size); MigrateObject(heap, object, target, object_size);
// Update slot to new target. // Update slot to new target.
...@@ -2146,20 +2136,15 @@ class ScavengingVisitor : public StaticVisitorBase { ...@@ -2146,20 +2136,15 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object, int object_size) { HeapObject* object, int object_size) {
Heap* heap = map->GetHeap(); Heap* heap = map->GetHeap();
int allocation_size = object_size;
if (alignment != kObjectAlignment) {
DCHECK(alignment == kDoubleAlignment);
allocation_size += kPointerSize;
}
AllocationResult allocation; AllocationResult allocation;
allocation = heap->old_space()->AllocateRaw(allocation_size); if (alignment == kDoubleAlignment) {
allocation = heap->old_space()->AllocateRawDoubleAligned(object_size);
} else {
allocation = heap->old_space()->AllocateRaw(object_size);
}
HeapObject* target = NULL; // Initialization to please compiler. HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) { if (allocation.To(&target)) {
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
MigrateObject(heap, object, target, object_size); MigrateObject(heap, object, target, object_size);
// Update slot to new target. // Update slot to new target.
...@@ -3678,7 +3663,7 @@ AllocationResult Heap::AllocateFixedTypedArray(int length, ...@@ -3678,7 +3663,7 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
if (!allocation.To(&object)) return allocation; if (!allocation.To(&object)) return allocation;
if (array_type == kExternalFloat64Array) { if (array_type == kExternalFloat64Array) {
object = EnsureDoubleAligned(this, object, size); object = EnsureDoubleAligned(object, size);
} }
object->set_map(MapForFixedTypedArray(array_type)); object->set_map(MapForFixedTypedArray(array_type));
...@@ -4409,7 +4394,7 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length, ...@@ -4409,7 +4394,7 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
if (!allocation.To(&object)) return allocation; if (!allocation.To(&object)) return allocation;
} }
return EnsureDoubleAligned(this, object, size); return EnsureDoubleAligned(object, size);
} }
...@@ -4427,7 +4412,7 @@ AllocationResult Heap::AllocateConstantPoolArray( ...@@ -4427,7 +4412,7 @@ AllocationResult Heap::AllocateConstantPoolArray(
AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE); AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&object)) return allocation; if (!allocation.To(&object)) return allocation;
} }
object = EnsureDoubleAligned(this, object, size); object = EnsureDoubleAligned(object, size);
object->set_map_no_write_barrier(constant_pool_array_map()); object->set_map_no_write_barrier(constant_pool_array_map());
ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object); ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
...@@ -4453,7 +4438,7 @@ AllocationResult Heap::AllocateExtendedConstantPoolArray( ...@@ -4453,7 +4438,7 @@ AllocationResult Heap::AllocateExtendedConstantPoolArray(
AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE); AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&object)) return allocation; if (!allocation.To(&object)) return allocation;
} }
object = EnsureDoubleAligned(this, object, size); object = EnsureDoubleAligned(object, size);
object->set_map_no_write_barrier(constant_pool_array_map()); object->set_map_no_write_barrier(constant_pool_array_map());
ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object); ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
......
...@@ -714,6 +714,11 @@ class Heap { ...@@ -714,6 +714,11 @@ class Heap {
MUST_USE_RESULT AllocationResult MUST_USE_RESULT AllocationResult
CopyJSObject(JSObject* source, AllocationSite* site = NULL); CopyJSObject(JSObject* source, AllocationSite* site = NULL);
// This method assumes overallocation of one word. It will store a filler
// before the object if the given object is not double aligned, otherwise
// it will place the filler after the object.
MUST_USE_RESULT HeapObject* EnsureDoubleAligned(HeapObject* object, int size);
// Clear the Instanceof cache (used when a prototype changes). // Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache(); inline void ClearInstanceofCache();
......
...@@ -250,6 +250,25 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { ...@@ -250,6 +250,25 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
} }
HeapObject* PagedSpace::AllocateLinearlyDoubleAlign(int size_in_bytes) {
Address current_top = allocation_info_.top();
int alignment_size = 0;
if ((OffsetFrom(current_top) & kDoubleAlignmentMask) != 0) {
alignment_size = kPointerSize;
size_in_bytes += alignment_size;
}
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit()) return NULL;
allocation_info_.set_top(new_top);
if (alignment_size > 0)
return heap()->EnsureDoubleAligned(HeapObject::FromAddress(current_top),
size_in_bytes);
return HeapObject::FromAddress(current_top);
}
// Raw allocation. // Raw allocation.
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) { AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
HeapObject* object = AllocateLinearly(size_in_bytes); HeapObject* object = AllocateLinearly(size_in_bytes);
...@@ -273,15 +292,69 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) { ...@@ -273,15 +292,69 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
} }
// Raw allocation.
AllocationResult PagedSpace::AllocateRawDoubleAligned(int size_in_bytes) {
DCHECK(identity() == OLD_SPACE);
HeapObject* object = AllocateLinearlyDoubleAlign(size_in_bytes);
int aligned_size_in_bytes = size_in_bytes + kPointerSize;
if (object == NULL) {
object = free_list_.Allocate(aligned_size_in_bytes);
if (object == NULL) {
object = SlowAllocateRaw(aligned_size_in_bytes);
}
object = heap()->EnsureDoubleAligned(object, aligned_size_in_bytes);
}
if (object != NULL) {
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
return object;
}
return AllocationResult::Retry(identity());
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// NewSpace // NewSpace
AllocationResult NewSpace::AllocateRawDoubleAligned(int size_in_bytes) {
Address old_top = allocation_info_.top();
int alignment_size = 0;
int aligned_size_in_bytes = 0;
// If double alignment is required and top pointer is not aligned, we allocate
// additional memory to take care of the alignment.
if ((OffsetFrom(old_top) & kDoubleAlignmentMask) != 0) {
alignment_size += kPointerSize;
}
aligned_size_in_bytes = size_in_bytes + alignment_size;
if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
return SlowAllocateRaw(size_in_bytes, true);
}
HeapObject* obj = HeapObject::FromAddress(old_top);
allocation_info_.set_top(allocation_info_.top() + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (alignment_size > 0) {
obj = heap()->EnsureDoubleAligned(obj, aligned_size_in_bytes);
}
// The slow path above ultimately goes through AllocateRaw, so this suffices.
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
return obj;
}
AllocationResult NewSpace::AllocateRaw(int size_in_bytes) { AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top(); Address old_top = allocation_info_.top();
if (allocation_info_.limit() - old_top < size_in_bytes) { if (allocation_info_.limit() - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes); return SlowAllocateRaw(size_in_bytes, false);
} }
HeapObject* obj = HeapObject::FromAddress(old_top); HeapObject* obj = HeapObject::FromAddress(old_top);
......
...@@ -1458,19 +1458,23 @@ bool NewSpace::AddFreshPage() { ...@@ -1458,19 +1458,23 @@ bool NewSpace::AddFreshPage() {
} }
AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) { AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
bool double_aligned) {
Address old_top = allocation_info_.top(); Address old_top = allocation_info_.top();
Address high = to_space_.page_high(); Address high = to_space_.page_high();
if (allocation_info_.limit() < high) { if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled // Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step. Set // or because incremental marking wants to get a chance to do a step. Set
// the new limit accordingly. // the new limit accordingly.
Address new_top = old_top + size_in_bytes; int aligned_size = size_in_bytes;
aligned_size += (double_aligned ? kPointerSize : 0);
Address new_top = old_top + aligned_size;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated, heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD); IncrementalMarking::GC_VIA_STACK_GUARD);
UpdateInlineAllocationLimit(size_in_bytes); UpdateInlineAllocationLimit(aligned_size);
top_on_previous_step_ = new_top; top_on_previous_step_ = new_top;
if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes);
return AllocateRaw(size_in_bytes); return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) { } else if (AddFreshPage()) {
// Switched to new page. Try allocating again. // Switched to new page. Try allocating again.
...@@ -1478,6 +1482,7 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -1478,6 +1482,7 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
heap()->incremental_marking()->Step(bytes_allocated, heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD); IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low(); top_on_previous_step_ = to_space_.page_low();
if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes);
return AllocateRaw(size_in_bytes); return AllocateRaw(size_in_bytes);
} else { } else {
return AllocationResult::Retry(); return AllocationResult::Retry();
......
...@@ -1759,6 +1759,11 @@ class PagedSpace : public Space { ...@@ -1759,6 +1759,11 @@ class PagedSpace : public Space {
// failure object if not. // failure object if not.
MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes); MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
MUST_USE_RESULT inline AllocationResult AllocateRawDoubleAligned(
int size_in_bytes);
// Give a block of memory to the space's free list. It might be added to // Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste. // the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and // If add_to_freelist is false then just accounting stats are updated and
...@@ -1919,6 +1924,10 @@ class PagedSpace : public Space { ...@@ -1919,6 +1924,10 @@ class PagedSpace : public Space {
// address denoted by top in allocation_info_. // address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes); inline HeapObject* AllocateLinearly(int size_in_bytes);
// Generic fast case allocation function that tries double aligned linear
// allocation at the address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearlyDoubleAlign(int size_in_bytes);
// If sweeping is still in progress try to sweep unswept pages. If that is // If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list // not successful, wait for the sweeper threads and re-try free-list
// allocation. // allocation.
...@@ -2484,6 +2493,9 @@ class NewSpace : public Space { ...@@ -2484,6 +2493,9 @@ class NewSpace : public Space {
return allocation_info_.limit_address(); return allocation_info_.limit_address();
} }
MUST_USE_RESULT INLINE(
AllocationResult AllocateRawDoubleAligned(int size_in_bytes));
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes)); MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
// Reset the allocation pointer to the beginning of the active semispace. // Reset the allocation pointer to the beginning of the active semispace.
...@@ -2601,7 +2613,8 @@ class NewSpace : public Space { ...@@ -2601,7 +2613,8 @@ class NewSpace : public Space {
HistogramInfo* allocated_histogram_; HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_; HistogramInfo* promoted_histogram_;
MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes); MUST_USE_RESULT AllocationResult
SlowAllocateRaw(int size_in_bytes, bool double_aligned);
friend class SemiSpaceIterator; friend class SemiSpaceIterator;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment