Commit ed976c91 authored by Wenyu Zhao's avatar Wenyu Zhao Committed by Commit Bot

[heap] Add DCHECKs for third party heap

Place DCHECKs in per-space allocation functions and concurrent allocation functions
to assert TPH is not enabled.

For per-space allocation function, they should be redirected to TPH.

For concurrent allocations, TPH doesn't support it at the moment.

Bug: v8:11641
Change-Id: Ib7292bc03132353b0555d9cbd5b49936f4a6de76
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2849817
Commit-Queue: Wenyu Zhao <wenyu.zhao@anu.edu.au>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74211}
parent 3e3d34ca
......@@ -20,6 +20,7 @@ namespace internal {
AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
// TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG
local_heap_->VerifyCurrent();
......
......@@ -130,6 +130,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
DCHECK(!FLAG_enable_third_party_heap);
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) ||
......@@ -160,6 +161,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size) {
DCHECK(!FLAG_enable_third_party_heap);
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
......@@ -446,6 +448,7 @@ NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
DCHECK(!FLAG_enable_third_party_heap);
// Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
......@@ -532,6 +535,7 @@ CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
chunk_map_(kInitialChunkMapCapacity) {}
AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
DCHECK(!FLAG_enable_third_party_heap);
return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
}
......
......@@ -18,6 +18,7 @@ namespace internal {
AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
DCHECK(!FLAG_enable_third_party_heap);
#if DEBUG
VerifyCurrent();
DCHECK(AllowHandleAllocation::IsAllowed());
......@@ -45,6 +46,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
if (!result.IsRetry()) return result.ToObject().address();
return PerformCollectionAndAllocateAgain(object_size, type, origin,
......
......@@ -88,6 +88,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_single_generation);
DCHECK(!FLAG_enable_third_party_heap);
#if DEBUG
VerifyTop();
#endif
......
......@@ -628,6 +628,7 @@ AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
return AllocationResult::Retry();
}
......@@ -646,6 +647,7 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, alignment)) {
return AllocationResult::Retry();
}
......
......@@ -131,6 +131,7 @@ AllocationResult PagedSpace::AllocateFastAligned(
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureLabMain(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
......@@ -153,6 +154,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
DCHECK_EQ(identity(), OLD_SPACE);
int allocation_size = size_in_bytes;
// We don't know exactly how much filler we need to align until space is
......@@ -182,6 +184,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result;
if (alignment != kWordAligned) {
......
......@@ -635,6 +635,7 @@ HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
AllocationResult ReadOnlySpace::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
DCHECK(!FLAG_enable_third_party_heap);
DCHECK(!IsDetached());
int allocation_size = size_in_bytes;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment