Commit aab3f045 authored by Steve Blackburn's avatar Steve Blackburn Committed by Commit Bot

Generalize allocation types in TPH API.

The TPH needs to support the idea of 'code' objects, and the ability to query whether an object is of this type.

Bug: v8:9533
Change-Id: Ic36b235bd9640aa675a3ef0d8c3f6c98dd8cd862
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2013116Reviewed-by: 's avatarPeter Marshall <petermarshall@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Steve Blackburn <steveblackburn@google.com>
Cr-Commit-Position: refs/heads/master@{#65948}
parent ceca93b8
......@@ -7,6 +7,7 @@
#include "src/heap/heap.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/third-party/heap-api.h"
#include "src/objects/objects.h"
namespace v8 {
......@@ -30,7 +31,10 @@ class V8_EXPORT_PRIVATE CombinedHeapObjectIterator final {
V8_WARN_UNUSED_RESULT inline bool IsValidHeapObject(Heap* heap,
HeapObject object) {
return ReadOnlyHeap::Contains(object) || heap->Contains(object);
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
return third_party_heap::Heap::IsValidHeapObject(object);
else
return ReadOnlyHeap::Contains(object) || heap->Contains(object);
}
} // namespace internal
......
......@@ -183,48 +183,47 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
type = AllocationType::kOld;
}
if (AllocationType::kYoung == type) {
if (large_object) {
if (FLAG_young_generation_large_objects) {
allocation = new_lo_space_->AllocateRaw(size_in_bytes);
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
allocation = tp_heap_->Allocate(size_in_bytes, type, alignment);
} else {
if (AllocationType::kYoung == type) {
if (large_object) {
if (FLAG_young_generation_large_objects) {
allocation = new_lo_space_->AllocateRaw(size_in_bytes);
} else {
// If young generation large objects are disalbed we have to tenure
// the allocation and violate the given allocation type. This could be
// dangerous. We may want to remove
// FLAG_young_generation_large_objects and avoid patching.
allocation = lo_space_->AllocateRaw(size_in_bytes);
}
} else {
// If young generation large objects are disalbed we have to tenure the
// allocation and violate the given allocation type. This could be
// dangerous. We may want to remove FLAG_young_generation_large_objects
// and avoid patching.
allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kOld == type) {
if (large_object) {
allocation = lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kCode == type) {
if (size_in_bytes <= code_space()->AreaSize() && !large_object) {
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
} else {
allocation = code_lo_space_->AllocateRaw(size_in_bytes);
}
} else if (AllocationType::kMap == type) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (AllocationType::kReadOnly == type) {
DCHECK(isolate_->serializer_enabled());
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
allocation =
read_only_space_->AllocateRaw(size_in_bytes, alignment, origin);
} else {
allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kOld == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
if (large_object) {
allocation = lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kCode == type) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
allocation = tp_heap_->AllocateCode(size_in_bytes, alignment);
} else if (size_in_bytes <= code_space()->AreaSize() && !large_object) {
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
} else {
allocation = code_lo_space_->AllocateRaw(size_in_bytes);
UNREACHABLE();
}
} else if (AllocationType::kMap == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (AllocationType::kReadOnly == type &&
!V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
DCHECK(isolate_->serializer_enabled());
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
allocation =
read_only_space_->AllocateRaw(size_in_bytes, alignment, origin);
} else if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
allocation = tp_heap_->Allocate(size_in_bytes, alignment);
} else {
UNREACHABLE();
}
if (allocation.To(&object)) {
......@@ -249,6 +248,11 @@ template <Heap::AllocationRetryMode mode>
HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
AllocationOrigin origin,
AllocationAlignment alignment) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
AllocationResult result = AllocateRaw(size, allocation, origin, alignment);
DCHECK(!result.IsRetry());
return result.ToObjectChecked();
}
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_EQ(gc_state_, NOT_IN_GC);
......
......@@ -5528,7 +5528,9 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
// Find known WeakArrayLists and compact them.
Handle<WeakArrayList> scripts(script_list(), isolate());
DCHECK_IMPLIES(allocation == AllocationType::kOld, InOldSpace(*scripts));
DCHECK_IMPLIES(
!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && allocation == AllocationType::kOld,
InOldSpace(*scripts));
scripts = CompactWeakArrayList(this, scripts, allocation);
set_script_list(*scripts);
}
......
......@@ -143,6 +143,7 @@ bool ReadOnlyHeap::Contains(Address address) {
// static
bool ReadOnlyHeap::Contains(HeapObject object) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return MemoryChunk::FromHeapObject(object)->InReadOnlySpace();
}
......
......@@ -18,13 +18,15 @@ class Heap {
static v8::Isolate* GetIsolate(Address address);
AllocationResult Allocate(size_t size_in_bytes, AllocationAlignment align);
AllocationResult AllocateCode(size_t size_in_bytes,
AllocationAlignment align);
AllocationResult Allocate(size_t size_in_bytes, AllocationType type,
AllocationAlignment align);
Address GetObjectFromInnerPointer(Address inner_pointer);
static bool InCodeSpace(Address address);
static bool IsValidHeapObject(HeapObject object);
bool CollectGarbage();
};
......
......@@ -554,25 +554,42 @@ void Serializer::ObjectSerializer::Serialize() {
SerializeObject();
}
void Serializer::ObjectSerializer::SerializeObject() {
int size = object_.Size();
Map map = object_.map();
SnapshotSpace space;
if (ReadOnlyHeap::Contains(object_)) {
space = SnapshotSpace::kReadOnlyHeap;
namespace {
SnapshotSpace GetSnapshotSpace(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
if (third_party_heap::Heap::InCodeSpace(object.address())) {
return SnapshotSpace::kCode;
} else if (ReadOnlyHeap::Contains(object)) {
return SnapshotSpace::kReadOnlyHeap;
} else if (object.Size() > kMaxRegularHeapObjectSize) {
return SnapshotSpace::kLargeObject;
} else if (object.IsMap()) {
return SnapshotSpace::kMap;
} else {
return SnapshotSpace::kNew; // avoid new/young distinction in TPH
}
} else if (ReadOnlyHeap::Contains(object)) {
return SnapshotSpace::kReadOnlyHeap;
} else {
AllocationSpace heap_space =
MemoryChunk::FromHeapObject(object_)->owner_identity();
MemoryChunk::FromHeapObject(object)->owner_identity();
// Large code objects are not supported and cannot be expressed by
// SnapshotSpace.
DCHECK_NE(heap_space, CODE_LO_SPACE);
// Young generation large objects are tenured.
if (heap_space == NEW_LO_SPACE) {
space = SnapshotSpace::kLargeObject;
return SnapshotSpace::kLargeObject;
} else {
space = static_cast<SnapshotSpace>(heap_space);
return static_cast<SnapshotSpace>(heap_space);
}
}
}
} // namespace
void Serializer::ObjectSerializer::SerializeObject() {
int size = object_.Size();
Map map = object_.map();
SnapshotSpace space = GetSnapshotSpace(object_);
SerializePrologue(space, size, map);
// Serialize the rest of the object.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment