Commit a4872611 authored by Maya Lekova's avatar Maya Lekova Committed by Commit Bot

Revert "[heap] Introduce fast path in PagedSpace::AllocateRaw"

This reverts commit 099b1cf3.

Reason for revert: Fails on MSAN - https://ci.chromium.org/p/v8/builders/ci/V8%20Clusterfuzz%20Linux%20MSAN%20no%20origins/16034?

Original change's description:
> [heap] Introduce fast path in PagedSpace::AllocateRaw
> 
> Introduce explicit fast path for allocation in PagedSpace. The slow path
> is moved into AllocateRawSlow which refills the LAB and retries the
> allocation.
> 
> Bug: v8:10315
> Change-Id: I807e56ccde833b0e9a42166b4883c1978d01e828
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2277808
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#68653}

TBR=ulan@chromium.org,dinfuehr@chromium.org

Change-Id: I11ce25da69a24a0a5ccc5e0324d1dfe71cdeaca0
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:10315
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2275969Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68654}
parent 099b1cf3
......@@ -5,7 +5,6 @@
#ifndef V8_HEAP_PAGED_SPACES_INL_H_
#define V8_HEAP_PAGED_SPACES_INL_H_
#include "src/common/globals.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/paged-spaces.h"
#include "src/objects/code-inl.h"
......@@ -97,35 +96,31 @@ bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
return SlowRefillLinearAllocationArea(size_in_bytes, origin);
}
AllocationResult PagedSpace::AllocateLinearly(int size_in_bytes) {
HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit())
return AllocationResult::Retry(identity());
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return AllocationResult(HeapObject::FromAddress(current_top));
return HeapObject::FromAddress(current_top);
}
AllocationResult PagedSpace::TryAllocateLinearlyAligned(
HeapObject PagedSpace::TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes;
if (new_top > allocation_info_.limit())
return AllocationResult::Retry(identity());
if (new_top > allocation_info_.limit()) return HeapObject();
allocation_info_.set_top(new_top);
if (filler_size > 0) {
*size_in_bytes += filler_size;
HeapObject object = Heap::PrecedeWithFiller(
ReadOnlyRoots(heap()), HeapObject::FromAddress(current_top),
filler_size);
return AllocationResult(object);
return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
HeapObject::FromAddress(current_top),
filler_size);
}
return AllocationResult(HeapObject::FromAddress(current_top));
return HeapObject::FromAddress(current_top);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
......@@ -133,16 +128,15 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
AllocationResult result = AllocateLinearly(size_in_bytes);
DCHECK(!result.IsRetry());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
HeapObject object = AllocateLinearly(size_in_bytes);
DCHECK(!object.is_null());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return result;
return object;
}
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
......@@ -150,44 +144,62 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK_EQ(identity(), OLD_SPACE);
int allocation_size = size_in_bytes;
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
object = TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK(!object.is_null());
}
allocation_size = size_in_bytes;
AllocationResult result =
TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK(!result.IsRetry());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return result;
return object;
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
AllocationResult result;
if (alignment != kWordAligned) {
int allocation_size = size_in_bytes;
result = TryAllocateLinearlyAligned(&allocation_size, alignment);
} else {
result = AllocateLinearly(size_in_bytes);
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
if (!result.IsRetry()) {
return result;
} else {
return AllocateRawSlow(size_in_bytes, alignment, origin);
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}
} // namespace internal
......
......@@ -1004,43 +1004,6 @@ bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
return false;
}
AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}
// -----------------------------------------------------------------------------
// MapSpace implementation
......
......@@ -327,11 +327,6 @@ class V8_EXPORT_PRIVATE PagedSpace
return identity() == OLD_SPACE && !is_local_space();
}
// Slow path of allocation function
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
......@@ -357,13 +352,13 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline AllocationResult AllocateLinearly(int size_in_bytes);
inline HeapObject AllocateLinearly(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
inline AllocationResult TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment);
inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes, AllocationOrigin origin);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment