Commit 7fcbeb28 authored by hpayer's avatar hpayer Committed by Commit bot

Implement unaligned allocate and allocate heap numbers in runtime double unaligned.

In follow-up CLs the scavenger and the MC collector should also respect the unalignment of heap numbers.

BUG=

Review URL: https://codereview.chromium.org/1141523002

Cr-Commit-Position: refs/heads/master@{#28360}
parent 3bce9c3a
......@@ -438,6 +438,7 @@ enum AllocationSpace {
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
// A flag that indicates whether objects should be pretenured when
// allocated (allocated directly into the old generation) or not
......
......@@ -157,7 +157,7 @@ AllocationResult Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
AllocationSpace retry_space,
Alignment alignment) {
AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(gc_state_ == NOT_IN_GC);
......@@ -174,10 +174,10 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
AllocationResult allocation;
if (NEW_SPACE == space) {
#ifndef V8_HOST_ARCH_64_BIT
if (alignment == kDoubleAligned) {
allocation = new_space_.AllocateRawDoubleAligned(size_in_bytes);
} else {
if (alignment == kWordAligned) {
allocation = new_space_.AllocateRaw(size_in_bytes);
} else {
allocation = new_space_.AllocateRawAligned(size_in_bytes, alignment);
}
#else
allocation = new_space_.AllocateRaw(size_in_bytes);
......@@ -194,10 +194,10 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
if (OLD_SPACE == space) {
#ifndef V8_HOST_ARCH_64_BIT
if (alignment == kDoubleAligned) {
allocation = old_space_->AllocateRawDoubleAligned(size_in_bytes);
} else {
if (alignment == kWordAligned) {
allocation = old_space_->AllocateRaw(size_in_bytes);
} else {
allocation = old_space_->AllocateRawAligned(size_in_bytes, alignment);
}
#else
allocation = old_space_->AllocateRaw(size_in_bytes);
......
......@@ -1965,10 +1965,20 @@ STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
kDoubleAlignmentMask) == 0); // NOLINT
STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
0); // NOLINT
#ifdef V8_HOST_ARCH_32_BIT
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
0); // NOLINT
#endif
HeapObject* Heap::EnsureDoubleAligned(HeapObject* object, int size) {
if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
HeapObject* Heap::EnsureAligned(HeapObject* object, int size,
AllocationAlignment alignment) {
if (alignment == kDoubleAligned &&
(OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else if (alignment == kDoubleUnaligned &&
(OffsetFrom(object->address()) & kDoubleAlignmentMask) == 0) {
CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else {
......@@ -1978,8 +1988,14 @@ HeapObject* Heap::EnsureDoubleAligned(HeapObject* object, int size) {
}
HeapObject* Heap::PrecedeWithFiller(HeapObject* object) {
CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
}
HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
return EnsureDoubleAligned(object, size);
return EnsureAligned(object, size, kDoubleAligned);
}
......@@ -2131,9 +2147,10 @@ class ScavengingVisitor : public StaticVisitorBase {
DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
AllocationResult allocation;
#ifndef V8_HOST_ARCH_64_BIT
#ifdef V8_HOST_ARCH_32_BIT
if (alignment == kDoubleAlignment) {
allocation = heap->new_space()->AllocateRawDoubleAligned(object_size);
allocation =
heap->new_space()->AllocateRawAligned(object_size, kDoubleAligned);
} else {
allocation = heap->new_space()->AllocateRaw(object_size);
}
......@@ -2167,9 +2184,10 @@ class ScavengingVisitor : public StaticVisitorBase {
Heap* heap = map->GetHeap();
AllocationResult allocation;
#ifndef V8_HOST_ARCH_64_BIT
#ifdef V8_HOST_ARCH_32_BIT
if (alignment == kDoubleAlignment) {
allocation = heap->old_space()->AllocateRawDoubleAligned(object_size);
allocation =
heap->old_space()->AllocateRawAligned(object_size, kDoubleAligned);
} else {
allocation = heap->old_space()->AllocateRaw(object_size);
}
......@@ -2840,7 +2858,8 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
HeapObject* result;
{
AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
AllocationResult allocation =
AllocateRaw(size, space, OLD_SPACE, kDoubleUnaligned);
if (!allocation.To(&result)) return allocation;
}
......
......@@ -719,7 +719,10 @@ class Heap {
// This method assumes overallocation of one word. It will store a filler
// before the object if the given object is not double aligned, otherwise
// it will place the filler after the object.
MUST_USE_RESULT HeapObject* EnsureDoubleAligned(HeapObject* object, int size);
MUST_USE_RESULT HeapObject* EnsureAligned(HeapObject* object, int size,
AllocationAlignment alignment);
MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object);
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
......@@ -1819,15 +1822,13 @@ class Heap {
HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
enum Alignment { kWordAligned, kDoubleAligned };
// Allocate an uninitialized object. The memory is non-executable if the
// hardware and OS allow. This is the single choke-point for allocations
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
MUST_USE_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationSpace space, AllocationSpace retry_space,
Alignment aligment = kWordAligned);
AllocationAlignment aligment = kWordAligned);
// Allocates a heap object based on the map.
MUST_USE_RESULT AllocationResult
......
......@@ -1942,9 +1942,9 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
}
AllocationResult allocation;
#ifndef V8_HOST_ARCH_64_BIT
#ifdef V8_HOST_ARCH_32_BIT
if (object->NeedsToEnsureDoubleAlignment()) {
allocation = new_space->AllocateRawDoubleAligned(size);
allocation = new_space->AllocateRawAligned(size, kDoubleAligned);
} else {
allocation = new_space->AllocateRaw(size);
}
......@@ -1958,9 +1958,9 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
// always room.
UNREACHABLE();
}
#ifndef V8_HOST_ARCH_64_BIT
#ifdef V8_HOST_ARCH_32_BIT
if (object->NeedsToEnsureDoubleAlignment()) {
allocation = new_space->AllocateRawDoubleAligned(size);
allocation = new_space->AllocateRawAligned(size, kDoubleAligned);
} else {
allocation = new_space->AllocateRaw(size);
}
......@@ -3120,9 +3120,9 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
HeapObject* target;
AllocationResult allocation;
#ifndef V8_HOST_ARCH_64_BIT
#ifdef V8_HOST_ARCH_32_BIT
if (object->NeedsToEnsureDoubleAlignment()) {
allocation = old_space->AllocateRawDoubleAligned(object_size);
allocation = old_space->AllocateRawAligned(object_size, kDoubleAligned);
} else {
allocation = old_space->AllocateRaw(object_size);
}
......
......@@ -250,11 +250,17 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
}
HeapObject* PagedSpace::AllocateLinearlyDoubleAlign(int size_in_bytes) {
HeapObject* PagedSpace::AllocateLinearlyAligned(int size_in_bytes,
AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int alignment_size = 0;
if ((OffsetFrom(current_top) & kDoubleAlignmentMask) != 0) {
if (alignment == kDoubleAligned &&
(OffsetFrom(current_top) & kDoubleAlignmentMask) != 0) {
alignment_size = kPointerSize;
size_in_bytes += alignment_size;
} else if (alignment == kDoubleUnaligned &&
(OffsetFrom(current_top) & kDoubleAlignmentMask) == 0) {
alignment_size = kPointerSize;
size_in_bytes += alignment_size;
}
......@@ -262,9 +268,10 @@ HeapObject* PagedSpace::AllocateLinearlyDoubleAlign(int size_in_bytes) {
if (new_top > allocation_info_.limit()) return NULL;
allocation_info_.set_top(new_top);
if (alignment_size > 0)
return heap()->EnsureDoubleAligned(HeapObject::FromAddress(current_top),
size_in_bytes);
if (alignment_size > 0) {
return heap()->EnsureAligned(HeapObject::FromAddress(current_top),
size_in_bytes, alignment);
}
return HeapObject::FromAddress(current_top);
}
......@@ -293,9 +300,10 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
// Raw allocation.
AllocationResult PagedSpace::AllocateRawDoubleAligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE);
HeapObject* object = AllocateLinearlyDoubleAlign(size_in_bytes);
HeapObject* object = AllocateLinearlyAligned(size_in_bytes, alignment);
int aligned_size_in_bytes = size_in_bytes + kPointerSize;
if (object == NULL) {
......@@ -304,7 +312,7 @@ AllocationResult PagedSpace::AllocateRawDoubleAligned(int size_in_bytes) {
object = SlowAllocateRaw(aligned_size_in_bytes);
}
if (object != NULL) {
object = heap()->EnsureDoubleAligned(object, aligned_size_in_bytes);
object = heap()->EnsureAligned(object, aligned_size_in_bytes, alignment);
}
}
......@@ -321,20 +329,25 @@ AllocationResult PagedSpace::AllocateRawDoubleAligned(int size_in_bytes) {
// NewSpace
AllocationResult NewSpace::AllocateRawDoubleAligned(int size_in_bytes) {
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
int alignment_size = 0;
int aligned_size_in_bytes = 0;
// If double alignment is required and top pointer is not aligned, we allocate
// additional memory to take care of the alignment.
if ((OffsetFrom(old_top) & kDoubleAlignmentMask) != 0) {
if (alignment == kDoubleAligned &&
(OffsetFrom(old_top) & kDoubleAlignmentMask) != 0) {
alignment_size += kPointerSize;
} else if (alignment == kDoubleUnaligned &&
(OffsetFrom(old_top) & kDoubleAlignmentMask) == 0) {
alignment_size += kPointerSize;
}
aligned_size_in_bytes = size_in_bytes + alignment_size;
if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
return SlowAllocateRaw(size_in_bytes, true);
return SlowAllocateRaw(size_in_bytes, alignment);
}
HeapObject* obj = HeapObject::FromAddress(old_top);
......@@ -342,12 +355,15 @@ AllocationResult NewSpace::AllocateRawDoubleAligned(int size_in_bytes) {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (alignment_size > 0) {
obj = heap()->EnsureDoubleAligned(obj, aligned_size_in_bytes);
obj = heap()->PrecedeWithFiller(obj);
}
// The slow path above ultimately goes through AllocateRaw, so this suffices.
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
DCHECK((kDoubleAligned && (OffsetFrom(obj) & kDoubleAlignmentMask) == 0) ||
(kDoubleUnaligned && (OffsetFrom(obj) & kDoubleAlignmentMask) != 0));
return obj;
}
......@@ -356,7 +372,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top();
if (allocation_info_.limit() - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes, false);
return SlowAllocateRaw(size_in_bytes, kWordAligned);
}
HeapObject* obj = HeapObject::FromAddress(old_top);
......
......@@ -1459,7 +1459,7 @@ bool NewSpace::AddFreshPage() {
AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
bool double_aligned) {
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
......@@ -1467,14 +1467,17 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
// or because incremental marking wants to get a chance to do a step. Set
// the new limit accordingly.
int aligned_size = size_in_bytes;
aligned_size += (double_aligned ? kPointerSize : 0);
aligned_size += (alignment != kWordAligned) ? kPointerSize : 0;
Address new_top = old_top + aligned_size;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
UpdateInlineAllocationLimit(aligned_size);
top_on_previous_step_ = new_top;
if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes);
if (alignment == kDoubleAligned)
return AllocateRawAligned(size_in_bytes, kDoubleAligned);
else if (alignment == kDoubleUnaligned)
return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
......@@ -1482,7 +1485,10 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes);
if (alignment == kDoubleAligned)
return AllocateRawAligned(size_in_bytes, kDoubleAligned);
else if (alignment == kDoubleUnaligned)
return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
return AllocateRaw(size_in_bytes);
} else {
return AllocationResult::Retry();
......
......@@ -1768,8 +1768,8 @@ class PagedSpace : public Space {
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
MUST_USE_RESULT inline AllocationResult AllocateRawDoubleAligned(
int size_in_bytes);
MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
......@@ -1933,7 +1933,8 @@ class PagedSpace : public Space {
// Generic fast case allocation function that tries double aligned linear
// allocation at the address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearlyDoubleAlign(int size_in_bytes);
inline HeapObject* AllocateLinearlyAligned(int size_in_bytes,
AllocationAlignment alignment);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
......@@ -2500,8 +2501,8 @@ class NewSpace : public Space {
return allocation_info_.limit_address();
}
MUST_USE_RESULT INLINE(
AllocationResult AllocateRawDoubleAligned(int size_in_bytes));
MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment));
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
......@@ -2621,7 +2622,7 @@ class NewSpace : public Space {
HistogramInfo* promoted_histogram_;
MUST_USE_RESULT AllocationResult
SlowAllocateRaw(int size_in_bytes, bool double_aligned);
SlowAllocateRaw(int size_in_bytes, AllocationAlignment alignment);
friend class SemiSpaceIterator;
......
......@@ -2812,13 +2812,22 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
bool HeapObject::NeedsToEnsureDoubleAlignment() {
#ifndef V8_HOST_ARCH_64_BIT
#ifdef V8_HOST_ARCH_32_BIT
return (IsFixedFloat64Array() || IsFixedDoubleArray() ||
IsConstantPoolArray()) &&
FixedArrayBase::cast(this)->length() != 0;
#else
return false;
#endif // V8_HOST_ARCH_64_BIT
#endif // V8_HOST_ARCH_32_BIT
}
bool HeapObject::NeedsToEnsureDoubleUnalignment() {
#ifdef V8_HOST_ARCH_32_BIT
return IsHeapNumber();
#else
return false;
#endif // V8_HOST_ARCH_32_BIT
}
......
......@@ -1481,6 +1481,7 @@ class HeapObject: public Object {
#endif
inline bool NeedsToEnsureDoubleAlignment();
inline bool NeedsToEnsureDoubleUnalignment();
// Layout description.
// First field in a heap object is map.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment