Commit 8a437788 authored by Maciej Goszczycki's avatar Maciej Goszczycki Committed by Commit Bot

[roheap] Set owner_ to nullptr for read-only chunks

As it stands most of ReadOnlySpace class's method are unusable once it
has been sealed, since all of its pages are read-only. Set owner_ to null
to ensure nothing unintentionally uses it.

This also helps with separating the ReadOnlySpace from the Heap class in
the future as ReadOnlySpace might not inherit from Space.

Bug: v8:7464
Change-Id: I3b24f20c644d6f5e23647bc1de4d256a20a0eb19
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1637800Reviewed-by: 's avatarDan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Maciej Goszczycki <goszczycki@google.com>
Cr-Commit-Position: refs/heads/master@{#61979}
parent 780c8699
...@@ -263,15 +263,13 @@ void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) { ...@@ -263,15 +263,13 @@ void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
} }
bool Heap::CanAllocateInReadOnlySpace() { bool Heap::CanAllocateInReadOnlySpace() {
return !deserialization_complete_ && return read_only_space()->writable();
(isolate()->serializer_enabled() ||
!isolate()->initialized_from_snapshot());
} }
void Heap::UpdateAllocationsHash(HeapObject object) { void Heap::UpdateAllocationsHash(HeapObject object) {
Address object_address = object.address(); Address object_address = object.address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address); MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
AllocationSpace allocation_space = memory_chunk->owner()->identity(); AllocationSpace allocation_space = memory_chunk->owner_identity();
STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32); STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
uint32_t value = uint32_t value =
...@@ -379,8 +377,7 @@ Heap* Heap::FromWritableHeapObject(const HeapObject obj) { ...@@ -379,8 +377,7 @@ Heap* Heap::FromWritableHeapObject(const HeapObject obj) {
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during // find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case. // bootstrapping, so explicitly allow this case.
SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE || SLOW_DCHECK(chunk->IsWritable());
static_cast<ReadOnlySpace*>(chunk->owner())->writable());
Heap* heap = chunk->heap(); Heap* heap = chunk->heap();
SLOW_DCHECK(heap != nullptr); SLOW_DCHECK(heap != nullptr);
return heap; return heap;
...@@ -614,8 +611,8 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope( ...@@ -614,8 +611,8 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
scope_active_(chunk_->heap()->write_protect_code_memory() && scope_active_(chunk_->heap()->write_protect_code_memory() &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) { if (scope_active_) {
DCHECK(chunk_->owner()->identity() == CODE_SPACE || DCHECK(chunk_->owner_identity() == CODE_SPACE ||
(chunk_->owner()->identity() == CODE_LO_SPACE)); (chunk_->owner_identity() == CODE_LO_SPACE));
chunk_->SetReadAndWritable(); chunk_->SetReadAndWritable();
} }
} }
......
...@@ -46,23 +46,14 @@ V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow( ...@@ -46,23 +46,14 @@ V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow(
// internals are only intended to shortcut write barrier checks. // internals are only intended to shortcut write barrier checks.
namespace heap_internals { namespace heap_internals {
struct Space {
static constexpr uintptr_t kIdOffset = 9 * kSystemPointerSize;
V8_INLINE AllocationSpace identity() {
return *reinterpret_cast<AllocationSpace*>(reinterpret_cast<Address>(this) +
kIdOffset);
}
};
struct MemoryChunk { struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = sizeof(size_t); static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
static constexpr uintptr_t kHeapOffset = static constexpr uintptr_t kHeapOffset =
kFlagsOffset + kUIntptrSize + 4 * kSystemPointerSize; kFlagsOffset + kUIntptrSize + 4 * kSystemPointerSize;
static constexpr uintptr_t kOwnerOffset =
kHeapOffset + 2 * kSystemPointerSize;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18; static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3; static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4; static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 21;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject( V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) { HeapObject object) {
...@@ -84,13 +75,12 @@ struct MemoryChunk { ...@@ -84,13 +75,12 @@ struct MemoryChunk {
V8_INLINE Heap* GetHeap() { V8_INLINE Heap* GetHeap() {
Heap* heap = *reinterpret_cast<Heap**>(reinterpret_cast<Address>(this) + Heap* heap = *reinterpret_cast<Heap**>(reinterpret_cast<Address>(this) +
kHeapOffset); kHeapOffset);
SLOW_DCHECK(heap != nullptr); DCHECK_NOT_NULL(heap);
return heap; return heap;
} }
V8_INLINE Space* GetOwner() { V8_INLINE bool InReadOnlySpace() const {
return *reinterpret_cast<Space**>(reinterpret_cast<Address>(this) + return GetFlags() & kReadOnlySpaceBit;
kOwnerOffset);
} }
}; };
...@@ -246,7 +236,7 @@ inline Heap* GetHeapFromWritableObject(const HeapObject object) { ...@@ -246,7 +236,7 @@ inline Heap* GetHeapFromWritableObject(const HeapObject object) {
inline bool GetIsolateFromWritableObject(HeapObject obj, Isolate** isolate) { inline bool GetIsolateFromWritableObject(HeapObject obj, Isolate** isolate) {
heap_internals::MemoryChunk* chunk = heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(obj); heap_internals::MemoryChunk::FromHeapObject(obj);
if (chunk->GetOwner()->identity() == RO_SPACE) { if (chunk->InReadOnlySpace()) {
*isolate = nullptr; *isolate = nullptr;
return false; return false;
} }
......
...@@ -3718,6 +3718,9 @@ const char* Heap::GarbageCollectionReasonToString( ...@@ -3718,6 +3718,9 @@ const char* Heap::GarbageCollectionReasonToString(
} }
bool Heap::Contains(HeapObject value) { bool Heap::Contains(HeapObject value) {
if (ReadOnlyHeap::Contains(value)) {
return false;
}
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) { if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false; return false;
} }
...@@ -5323,7 +5326,7 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) { ...@@ -5323,7 +5326,7 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object)); DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address()); Page* page = Page::FromAddress(slot.address());
if (!page->InYoungGeneration()) { if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner()->identity(), OLD_SPACE); DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->DeleteEntry(slot.address()); store_buffer()->DeleteEntry(slot.address());
} }
} }
...@@ -5333,7 +5336,7 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) { ...@@ -5333,7 +5336,7 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object)); DCHECK(!IsLargeObject(object));
if (InYoungGeneration(object)) return; if (InYoungGeneration(object)) return;
Page* page = Page::FromAddress(slot.address()); Page* page = Page::FromAddress(slot.address());
DCHECK_EQ(page->owner()->identity(), OLD_SPACE); DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet(); store_buffer()->MoveAllEntriesToRememberedSet();
CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address())); CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
// Old to old slots are filtered with invalidated slots. // Old to old slots are filtered with invalidated slots.
...@@ -5346,7 +5349,7 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) { ...@@ -5346,7 +5349,7 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start); Page* page = Page::FromAddress(start);
DCHECK(!page->IsLargePage()); DCHECK(!page->IsLargePage());
if (!page->InYoungGeneration()) { if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner()->identity(), OLD_SPACE); DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->DeleteEntry(start, end); store_buffer()->DeleteEntry(start, end);
} }
} }
...@@ -5856,7 +5859,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) { ...@@ -5856,7 +5859,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false; if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
InstanceType type = map.instance_type(); InstanceType type = map.instance_type();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj); MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
AllocationSpace src = chunk->owner()->identity(); AllocationSpace src = chunk->owner_identity();
switch (src) { switch (src) {
case NEW_SPACE: case NEW_SPACE:
return dst == NEW_SPACE || dst == OLD_SPACE; return dst == NEW_SPACE || dst == OLD_SPACE;
...@@ -6145,16 +6148,16 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) { ...@@ -6145,16 +6148,16 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING), CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
slim_chunk->IsMarking()); slim_chunk->IsMarking());
Space* chunk_owner = chunk->owner(); AllocationSpace identity = chunk->owner_identity();
AllocationSpace identity = chunk_owner->identity();
// Generation consistency. // Generation consistency.
CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE, CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
slim_chunk->InYoungGeneration()); slim_chunk->InYoungGeneration());
// Read-only consistency.
CHECK_EQ(chunk->InReadOnlySpace(), slim_chunk->InReadOnlySpace());
// Marking consistency. // Marking consistency.
if (identity != RO_SPACE || if (chunk->IsWritable()) {
static_cast<ReadOnlySpace*>(chunk->owner())->writable()) {
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during // find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case. // bootstrapping, so explicitly allow this case.
...@@ -6182,9 +6185,6 @@ static_assert(MemoryChunk::kFlagsOffset == ...@@ -6182,9 +6185,6 @@ static_assert(MemoryChunk::kFlagsOffset ==
static_assert(MemoryChunk::kHeapOffset == static_assert(MemoryChunk::kHeapOffset ==
heap_internals::MemoryChunk::kHeapOffset, heap_internals::MemoryChunk::kHeapOffset,
"Heap offset inconsistent"); "Heap offset inconsistent");
static_assert(MemoryChunk::kOwnerOffset ==
heap_internals::MemoryChunk::kOwnerOffset,
"Owner offset inconsistent");
void Heap::SetEmbedderStackStateForNextFinalizaton( void Heap::SetEmbedderStackStateForNextFinalizaton(
EmbedderHeapTracer::EmbedderStackState stack_state) { EmbedderHeapTracer::EmbedderStackState stack_state) {
......
...@@ -990,8 +990,9 @@ class Heap { ...@@ -990,8 +990,9 @@ class Heap {
// Returns whether the object resides in old space. // Returns whether the object resides in old space.
inline bool InOldSpace(Object object); inline bool InOldSpace(Object object);
// Checks whether an address/object in the heap (including auxiliary // Checks whether an address/object is in the non-read-only heap (including
// area and unused area). // auxiliary area and unused area). Use IsValidHeapObject if checking both
// heaps is required.
V8_EXPORT_PRIVATE bool Contains(HeapObject value); V8_EXPORT_PRIVATE bool Contains(HeapObject value);
// Checks whether an address/object in a space. // Checks whether an address/object in a space.
......
...@@ -1463,7 +1463,7 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { ...@@ -1463,7 +1463,7 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
inline bool Visit(HeapObject object, int size) override { inline bool Visit(HeapObject object, int size) override {
HeapObject target_object; HeapObject target_object;
if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(), if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) { object, size, &target_object)) {
DCHECK(object.map_word().IsForwardingAddress()); DCHECK(object.map_word().IsForwardingAddress());
return true; return true;
...@@ -3183,7 +3183,7 @@ void MarkCompactCollector::Evacuate() { ...@@ -3183,7 +3183,7 @@ void MarkCompactCollector::Evacuate() {
sweeper()->AddPageForIterability(p); sweeper()->AddPageForIterability(p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
DCHECK_EQ(OLD_SPACE, p->owner()->identity()); DCHECK_EQ(OLD_SPACE, p->owner_identity());
sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR); sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
} }
} }
...@@ -3191,7 +3191,7 @@ void MarkCompactCollector::Evacuate() { ...@@ -3191,7 +3191,7 @@ void MarkCompactCollector::Evacuate() {
for (Page* p : old_space_evacuation_pages_) { for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR); sweeper()->AddPage(p->owner_identity(), p, Sweeper::REGULAR);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED); p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
} }
} }
......
...@@ -108,7 +108,7 @@ void ReadOnlyHeap::ClearSharedHeapForTest() { ...@@ -108,7 +108,7 @@ void ReadOnlyHeap::ClearSharedHeapForTest() {
// static // static
bool ReadOnlyHeap::Contains(HeapObject object) { bool ReadOnlyHeap::Contains(HeapObject object) {
return Page::FromHeapObject(object)->owner()->identity() == RO_SPACE; return MemoryChunk::FromHeapObject(object)->InReadOnlySpace();
} }
// static // static
......
...@@ -215,7 +215,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size, ...@@ -215,7 +215,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
FLAG_young_generation_large_objects && FLAG_young_generation_large_objects &&
MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) { MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE, DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner()->identity()); MemoryChunk::FromHeapObject(object)->owner_identity());
if (object.map_slot().Release_CompareAndSwap( if (object.map_slot().Release_CompareAndSwap(
map, MapWord::FromForwardingAddress(object).ToMap()) == map) { map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
surviving_new_large_objects_.insert({object, map}); surviving_new_large_objects_.insert({object, map});
......
...@@ -413,7 +413,7 @@ void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) { ...@@ -413,7 +413,7 @@ void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
} }
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) { void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
AllocationSpace space = page->owner()->identity(); AllocationSpace space = page->owner_identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) { if ((space == OLD_SPACE) && !page->SweepingDone()) {
heap()->mark_compact_collector()->sweeper()->AddPage( heap()->mark_compact_collector()->sweeper()->AddPage(
space, reinterpret_cast<Page*>(page), space, reinterpret_cast<Page*>(page),
......
...@@ -234,14 +234,21 @@ void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type, ...@@ -234,14 +234,21 @@ void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
MemoryChunk* from, MemoryChunk* from,
MemoryChunk* to, MemoryChunk* to,
size_t amount) { size_t amount) {
DCHECK_NOT_NULL(from->owner());
DCHECK_NOT_NULL(to->owner());
base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount); base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount); base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(), Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
amount); amount);
} }
AllocationSpace MemoryChunk::owner_identity() const {
if (InReadOnlySpace()) return RO_SPACE;
return owner()->identity();
}
void Page::MarkNeverAllocateForTesting() { void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner()->identity() != NEW_SPACE); DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE)); DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
SetFlag(NEVER_ALLOCATE_ON_PAGE); SetFlag(NEVER_ALLOCATE_ON_PAGE);
SetFlag(NEVER_EVACUATE); SetFlag(NEVER_EVACUATE);
...@@ -376,7 +383,7 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned( ...@@ -376,7 +383,7 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
} }
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) { AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace()); DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes)) { if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity()); return AllocationResult::Retry(identity());
} }
...@@ -389,7 +396,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) { ...@@ -389,7 +396,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes, AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) { AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE); DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace()); DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes; int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment); HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) { if (object.is_null()) {
......
...@@ -64,16 +64,16 @@ HeapObjectIterator::HeapObjectIterator(Page* page) ...@@ -64,16 +64,16 @@ HeapObjectIterator::HeapObjectIterator(Page* page)
space_(reinterpret_cast<PagedSpace*>(page->owner())), space_(reinterpret_cast<PagedSpace*>(page->owner())),
page_range_(page), page_range_(page),
current_page_(page_range_.begin()) { current_page_(page_range_.begin()) {
#ifdef DEBUG #ifdef V8_SHARED_RO_HEAP
Space* owner = page->owner();
// TODO(v8:7464): Always enforce this once PagedSpace::Verify is no longer // TODO(v8:7464): Always enforce this once PagedSpace::Verify is no longer
// used to verify read-only space for non-shared builds. // used to verify read-only space for non-shared builds.
#ifdef V8_SHARED_RO_HEAP DCHECK(!page->InReadOnlySpace());
DCHECK_NE(owner->identity(), RO_SPACE); #endif // V8_SHARED_RO_HEAP
#endif
// Do not access the heap of the read-only space. #ifdef DEBUG
DCHECK(owner->identity() == RO_SPACE || owner->identity() == OLD_SPACE || AllocationSpace owner = page->owner_identity();
owner->identity() == MAP_SPACE || owner->identity() == CODE_SPACE); DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
owner == CODE_SPACE);
#endif // DEBUG #endif // DEBUG
} }
...@@ -562,8 +562,7 @@ void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions( ...@@ -562,8 +562,7 @@ void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
DCHECK(permission == PageAllocator::kRead || DCHECK(permission == PageAllocator::kRead ||
permission == PageAllocator::kReadExecute); permission == PageAllocator::kReadExecute);
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE)); DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner()->identity() == CODE_SPACE || DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
owner()->identity() == CODE_LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page // Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic. // protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_); base::MutexGuard guard(page_protection_change_mutex_);
...@@ -597,8 +596,7 @@ void MemoryChunk::SetReadAndExecutable() { ...@@ -597,8 +596,7 @@ void MemoryChunk::SetReadAndExecutable() {
void MemoryChunk::SetReadAndWritable() { void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE)); DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner()->identity() == CODE_SPACE || DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
owner()->identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page // Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic. // protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_); base::MutexGuard guard(page_protection_change_mutex_);
...@@ -735,6 +733,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -735,6 +733,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
->non_atomic_marking_state() ->non_atomic_marking_state()
->bitmap(chunk) ->bitmap(chunk)
->MarkAllBits(); ->MarkAllBits();
chunk->SetFlag(READ_ONLY_HEAP);
} else { } else {
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk, heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
0); 0);
...@@ -771,9 +770,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -771,9 +770,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Page* PagedSpace::InitializePage(MemoryChunk* chunk) { Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
Page* page = static_cast<Page*>(chunk); Page* page = static_cast<Page*>(chunk);
DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk( DCHECK_EQ(
page->owner()->identity()), MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
page->area_size()); page->area_size());
// Make sure that categories are initialized before freeing the area. // Make sure that categories are initialized before freeing the area.
page->ResetAllocationStatistics(); page->ResetAllocationStatistics();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking()); page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
...@@ -862,17 +861,15 @@ Page* Page::ConvertNewToOld(Page* old_page) { ...@@ -862,17 +861,15 @@ Page* Page::ConvertNewToOld(Page* old_page) {
} }
size_t MemoryChunk::CommittedPhysicalMemory() { size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE) if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
return size(); return size();
return high_water_mark_; return high_water_mark_;
} }
bool MemoryChunk::InOldSpace() const { bool MemoryChunk::InOldSpace() const { return owner_identity() == OLD_SPACE; }
return owner()->identity() == OLD_SPACE;
}
bool MemoryChunk::InLargeObjectSpace() const { bool MemoryChunk::InLargeObjectSpace() const {
return owner()->identity() == LO_SPACE; return owner_identity() == LO_SPACE;
} }
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
...@@ -1191,7 +1188,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { ...@@ -1191,7 +1188,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
reservation->Free(); reservation->Free();
} else { } else {
// Only read-only pages can have non-initialized reservation object. // Only read-only pages can have non-initialized reservation object.
DCHECK_EQ(RO_SPACE, chunk->owner()->identity()); DCHECK_EQ(RO_SPACE, chunk->owner_identity());
FreeMemory(page_allocator(chunk->executable()), chunk->address(), FreeMemory(page_allocator(chunk->executable()), chunk->address(),
chunk->size()); chunk->size());
} }
...@@ -1547,8 +1544,6 @@ void MemoryChunk::ReleaseMarkingBitmap() { ...@@ -1547,8 +1544,6 @@ void MemoryChunk::ReleaseMarkingBitmap() {
// PagedSpace implementation // PagedSpace implementation
void Space::CheckOffsetsAreConsistent() const { void Space::CheckOffsetsAreConsistent() const {
static_assert(Space::kIdOffset == heap_internals::Space::kIdOffset,
"ID offset inconsistent");
DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_)); DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
} }
...@@ -1619,6 +1614,7 @@ void PagedSpace::RefillFreeList() { ...@@ -1619,6 +1614,7 @@ void PagedSpace::RefillFreeList() {
identity() != MAP_SPACE && identity() != RO_SPACE) { identity() != MAP_SPACE && identity() != RO_SPACE) {
return; return;
} }
DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector(); MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0; size_t added = 0;
{ {
...@@ -2071,8 +2067,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) { ...@@ -2071,8 +2067,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// be in map space. // be in map space.
Map map = object.map(); Map map = object.map();
CHECK(map.IsMap()); CHECK(map.IsMap());
CHECK(isolate->heap()->map_space()->Contains(map) || CHECK(ReadOnlyHeap::Contains(map) ||
ReadOnlyHeap::Contains(map)); isolate->heap()->map_space()->Contains(map));
// Perform space-specific object verification. // Perform space-specific object verification.
VerifyObject(object); VerifyObject(object);
...@@ -2565,7 +2561,7 @@ void NewSpace::Verify(Isolate* isolate) { ...@@ -2565,7 +2561,7 @@ void NewSpace::Verify(Isolate* isolate) {
// be in map space or read-only space. // be in map space or read-only space.
Map map = object.map(); Map map = object.map();
CHECK(map.IsMap()); CHECK(map.IsMap());
CHECK(heap()->map_space()->Contains(map) || ReadOnlyHeap::Contains(map)); CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// The object should not be code or a map. // The object should not be code or a map.
CHECK(!object.IsMap()); CHECK(!object.IsMap());
...@@ -2960,7 +2956,7 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size, ...@@ -2960,7 +2956,7 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
} }
if (!prev_non_evac_node.is_null()) { if (!prev_non_evac_node.is_null()) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node); MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
if (chunk->owner()->identity() == CODE_SPACE) { if (chunk->owner_identity() == CODE_SPACE) {
chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk); chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
} }
prev_non_evac_node.set_next(cur_node.next()); prev_non_evac_node.set_next(cur_node.next());
...@@ -3383,6 +3379,7 @@ void ReadOnlyPage::MakeHeaderRelocatable() { ...@@ -3383,6 +3379,7 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
LSAN_IGNORE_OBJECT(categories_[i]); LSAN_IGNORE_OBJECT(categories_[i]);
} }
heap_ = nullptr; heap_ = nullptr;
owner_ = nullptr;
} }
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator, void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
...@@ -3619,7 +3616,7 @@ void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) { ...@@ -3619,7 +3616,7 @@ void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
} }
void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) { void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE); DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
DCHECK(page->IsLargePage()); DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE)); DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE)); DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
...@@ -3727,7 +3724,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) { ...@@ -3727,7 +3724,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// in map space or read-only space. // in map space or read-only space.
Map map = object.map(); Map map = object.map();
CHECK(map.IsMap()); CHECK(map.IsMap());
CHECK(heap()->map_space()->Contains(map) || ReadOnlyHeap::Contains(map)); CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// We have only the following types in the large object space: // We have only the following types in the large object space:
if (!(object.IsAbstractCode() || object.IsSeqString() || if (!(object.IsAbstractCode() || object.IsSeqString() ||
...@@ -3800,7 +3797,7 @@ void LargeObjectSpace::Print() { ...@@ -3800,7 +3797,7 @@ void LargeObjectSpace::Print() {
void Page::Print() { void Page::Print() {
// Make a best-effort to print the objects in the page. // Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()), PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
this->owner()->name()); Heap::GetSpaceName(this->owner_identity()));
printf(" --------------------------------------\n"); printf(" --------------------------------------\n");
HeapObjectIterator objects(this); HeapObjectIterator objects(this);
unsigned mark_size = 0; unsigned mark_size = 0;
...@@ -3860,7 +3857,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) { ...@@ -3860,7 +3857,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
#endif // ENABLE_MINOR_MC #endif // ENABLE_MINOR_MC
page->InitializationMemoryFence(); page->InitializationMemoryFence();
DCHECK(page->IsLargePage()); DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE); DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AllocationStep(object_size, result.address(), object_size); AllocationStep(object_size, result.address(), object_size);
return result; return result;
} }
......
...@@ -339,7 +339,12 @@ class MemoryChunk { ...@@ -339,7 +339,12 @@ class MemoryChunk {
// The memory chunk freeing bookkeeping has been performed but the chunk has // The memory chunk freeing bookkeeping has been performed but the chunk has
// not yet been freed. // not yet been freed.
UNREGISTERED = 1u << 20 UNREGISTERED = 1u << 20,
// The memory chunk belongs to the read-only heap and does not participate
// in garbage collection. This is used instead of owner for identity
// checking since read-only chunks have no owner once they are detached.
READ_ONLY_HEAP = 1u << 21,
}; };
using Flags = uintptr_t; using Flags = uintptr_t;
...@@ -606,7 +611,7 @@ class MemoryChunk { ...@@ -606,7 +611,7 @@ class MemoryChunk {
} }
template <AccessMode access_mode = AccessMode::NON_ATOMIC> template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool IsFlagSet(Flag flag) { bool IsFlagSet(Flag flag) const {
return (GetFlags<access_mode>() & flag) != 0; return (GetFlags<access_mode>() & flag) != 0;
} }
...@@ -619,7 +624,7 @@ class MemoryChunk { ...@@ -619,7 +624,7 @@ class MemoryChunk {
// Return all current flags. // Return all current flags.
template <AccessMode access_mode = AccessMode::NON_ATOMIC> template <AccessMode access_mode = AccessMode::NON_ATOMIC>
uintptr_t GetFlags() { uintptr_t GetFlags() const {
if (access_mode == AccessMode::NON_ATOMIC) { if (access_mode == AccessMode::NON_ATOMIC) {
return flags_; return flags_;
} else { } else {
...@@ -667,10 +672,24 @@ class MemoryChunk { ...@@ -667,10 +672,24 @@ class MemoryChunk {
bool InOldSpace() const; bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const; V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
// Gets the chunk's owner or null if the space has been detached.
Space* owner() const { return owner_; } Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; } void set_owner(Space* space) { owner_ = space; }
bool InReadOnlySpace() const {
return IsFlagSet(MemoryChunk::READ_ONLY_HEAP);
}
bool IsWritable() const {
// If this is a read-only space chunk but heap_ is non-null, it has not yet
// been sealed and can be written to.
return !InReadOnlySpace() || heap_ != nullptr;
}
// Gets the chunk's allocation space, potentially dealing with a null owner_
// (like read-only chunks have).
inline AllocationSpace owner_identity() const;
static inline bool HasHeaderSentinel(Address slot_addr); static inline bool HasHeaderSentinel(Address slot_addr);
// Emits a memory barrier. For TSAN builds the other thread needs to perform // Emits a memory barrier. For TSAN builds the other thread needs to perform
...@@ -1024,7 +1043,8 @@ class V8_EXPORT_PRIVATE Space : public Malloced { ...@@ -1024,7 +1043,8 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return heap_; return heap_;
} }
// Identity used in error reporting. bool IsDetached() const { return heap_ == nullptr; }
AllocationSpace identity() { return id_; } AllocationSpace identity() { return id_; }
const char* name() { return Heap::GetSpaceName(id_); } const char* name() { return Heap::GetSpaceName(id_); }
...@@ -2950,6 +2970,9 @@ class ReadOnlySpace : public PagedSpace { ...@@ -2950,6 +2970,9 @@ class ReadOnlySpace : public PagedSpace {
bool writable() const { return !is_marked_read_only_; } bool writable() const { return !is_marked_read_only_; }
bool Contains(Address a) = delete;
bool Contains(Object o) = delete;
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded(); V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap }; enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
......
...@@ -184,7 +184,7 @@ void Sweeper::StartSweeperTasks() { ...@@ -184,7 +184,7 @@ void Sweeper::StartSweeperTasks() {
void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) { void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
if (!page->SweepingDone()) { if (!page->SweepingDone()) {
ParallelSweepPage(page, page->owner()->identity()); ParallelSweepPage(page, page->owner_identity());
if (!page->SweepingDone()) { if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent // We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper // sweeper thread currently owns this page. Wait for the sweeper
...@@ -500,7 +500,7 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) { ...@@ -500,7 +500,7 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
} }
void Sweeper::EnsurePageIsIterable(Page* page) { void Sweeper::EnsurePageIsIterable(Page* page) {
AllocationSpace space = page->owner()->identity(); AllocationSpace space = page->owner_identity();
if (IsValidSweepingSpace(space)) { if (IsValidSweepingSpace(space)) {
SweepOrWaitUntilSweepingCompleted(page); SweepOrWaitUntilSweepingCompleted(page);
} else { } else {
...@@ -573,7 +573,7 @@ void Sweeper::AddPageForIterability(Page* page) { ...@@ -573,7 +573,7 @@ void Sweeper::AddPageForIterability(Page* page) {
DCHECK(sweeping_in_progress_); DCHECK(sweeping_in_progress_);
DCHECK(iterability_in_progress_); DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_); DCHECK(!iterability_task_started_);
DCHECK(IsValidIterabilitySpace(page->owner()->identity())); DCHECK(IsValidIterabilitySpace(page->owner_identity()));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state()); DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
iterability_list_.push_back(page); iterability_list_.push_back(page);
...@@ -581,7 +581,7 @@ void Sweeper::AddPageForIterability(Page* page) { ...@@ -581,7 +581,7 @@ void Sweeper::AddPageForIterability(Page* page) {
} }
void Sweeper::MakeIterable(Page* page) { void Sweeper::MakeIterable(Page* page) {
DCHECK(IsValidIterabilitySpace(page->owner()->identity())); DCHECK(IsValidIterabilitySpace(page->owner_identity()));
const FreeSpaceTreatmentMode free_space_mode = const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode); RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
......
...@@ -96,8 +96,7 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo( ...@@ -96,8 +96,7 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
} }
bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) { bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
PagedSpace* read_only_space = isolate()->heap()->read_only_space(); if (!ReadOnlyHeap::Contains(obj)) return false;
if (!read_only_space->Contains(obj)) return false;
// For objects in RO_SPACE, never serialize the object, but instead create a // For objects in RO_SPACE, never serialize the object, but instead create a
// back reference that encodes the page number as the chunk_index and the // back reference that encodes the page number as the chunk_index and the
...@@ -105,6 +104,7 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) { ...@@ -105,6 +104,7 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
Address address = obj.address(); Address address = obj.address();
Page* page = Page::FromAddress(address); Page* page = Page::FromAddress(address);
uint32_t chunk_index = 0; uint32_t chunk_index = 0;
ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
for (Page* p : *read_only_space) { for (Page* p : *read_only_space) {
if (p == page) break; if (p == page) break;
++chunk_index; ++chunk_index;
......
...@@ -567,7 +567,7 @@ void Serializer::ObjectSerializer::SerializeObject() { ...@@ -567,7 +567,7 @@ void Serializer::ObjectSerializer::SerializeObject() {
int size = object_.Size(); int size = object_.Size();
Map map = object_.map(); Map map = object_.map();
AllocationSpace space = AllocationSpace space =
MemoryChunk::FromHeapObject(object_)->owner()->identity(); MemoryChunk::FromHeapObject(object_)->owner_identity();
// Young generation large objects are tenured. // Young generation large objects are tenured.
if (space == NEW_LO_SPACE) { if (space == NEW_LO_SPACE) {
space = LO_SPACE; space = LO_SPACE;
......
...@@ -213,6 +213,7 @@ void ForceEvacuationCandidate(Page* page) { ...@@ -213,6 +213,7 @@ void ForceEvacuationCandidate(Page* page) {
CHECK(FLAG_manual_evacuation_candidates_selection); CHECK(FLAG_manual_evacuation_candidates_selection);
page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
PagedSpace* space = static_cast<PagedSpace*>(page->owner()); PagedSpace* space = static_cast<PagedSpace*>(page->owner());
DCHECK_NOT_NULL(space);
Address top = space->top(); Address top = space->top();
Address limit = space->limit(); Address limit = space->limit();
if (top < limit && Page::FromAllocationAreaAddress(top) == page) { if (top < limit && Page::FromAllocationAreaAddress(top) == page) {
......
...@@ -5924,7 +5924,7 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) { ...@@ -5924,7 +5924,7 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) {
// TODO(hpayer): Update the test as soon as we have a tenure limit for LO. // TODO(hpayer): Update the test as soon as we have a tenure limit for LO.
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000); Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small); MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity()); CHECK_EQ(NEW_LO_SPACE, chunk->owner_identity());
CHECK(chunk->IsFlagSet(MemoryChunk::LARGE_PAGE)); CHECK(chunk->IsFlagSet(MemoryChunk::LARGE_PAGE));
CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE)); CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
...@@ -5936,7 +5936,7 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) { ...@@ -5936,7 +5936,7 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) {
// After the first young generation GC array_small will be in the old // After the first young generation GC array_small will be in the old
// generation large object space. // generation large object space.
chunk = MemoryChunk::FromHeapObject(*array_small); chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(LO_SPACE, chunk->owner()->identity()); CHECK_EQ(LO_SPACE, chunk->owner_identity());
CHECK(!chunk->InYoungGeneration()); CHECK(!chunk->InYoungGeneration());
CcTest::CollectAllAvailableGarbage(); CcTest::CollectAllAvailableGarbage();
...@@ -5954,7 +5954,7 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) { ...@@ -5954,7 +5954,7 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
// TODO(hpayer): Update the test as soon as we have a tenure limit for LO. // TODO(hpayer): Update the test as soon as we have a tenure limit for LO.
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000); Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small); MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity()); CHECK_EQ(NEW_LO_SPACE, chunk->owner_identity());
CHECK(chunk->IsFlagSet(MemoryChunk::LARGE_PAGE)); CHECK(chunk->IsFlagSet(MemoryChunk::LARGE_PAGE));
CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE)); CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
...@@ -5966,7 +5966,7 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) { ...@@ -5966,7 +5966,7 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
// After the first full GC array_small will be in the old generation // After the first full GC array_small will be in the old generation
// large object space. // large object space.
chunk = MemoryChunk::FromHeapObject(*array_small); chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(LO_SPACE, chunk->owner()->identity()); CHECK_EQ(LO_SPACE, chunk->owner_identity());
CHECK(!chunk->InYoungGeneration()); CHECK(!chunk->InYoungGeneration());
CcTest::CollectAllAvailableGarbage(); CcTest::CollectAllAvailableGarbage();
...@@ -5986,7 +5986,7 @@ TEST(YoungGenerationLargeObjectAllocationReleaseScavenger) { ...@@ -5986,7 +5986,7 @@ TEST(YoungGenerationLargeObjectAllocationReleaseScavenger) {
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(20000); Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(20000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small); MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity()); CHECK_EQ(NEW_LO_SPACE, chunk->owner_identity());
CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE)); CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
} }
} }
...@@ -6009,7 +6009,7 @@ TEST(UncommitUnusedLargeObjectMemory) { ...@@ -6009,7 +6009,7 @@ TEST(UncommitUnusedLargeObjectMemory) {
Handle<FixedArray> array = Handle<FixedArray> array =
isolate->factory()->NewFixedArray(200000, AllocationType::kOld); isolate->factory()->NewFixedArray(200000, AllocationType::kOld);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array); MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array);
CHECK(chunk->owner()->identity() == LO_SPACE); CHECK(chunk->owner_identity() == LO_SPACE);
intptr_t size_before = array->Size(); intptr_t size_before = array->Size();
size_t committed_memory_before = chunk->CommittedPhysicalMemory(); size_t committed_memory_before = chunk->CommittedPhysicalMemory();
...@@ -6033,7 +6033,7 @@ TEST(RememberedSetRemoveRange) { ...@@ -6033,7 +6033,7 @@ TEST(RememberedSetRemoveRange) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray( Handle<FixedArray> array = isolate->factory()->NewFixedArray(
Page::kPageSize / kTaggedSize, AllocationType::kOld); Page::kPageSize / kTaggedSize, AllocationType::kOld);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array); MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array);
CHECK(chunk->owner()->identity() == LO_SPACE); CHECK(chunk->owner_identity() == LO_SPACE);
Address start = array->address(); Address start = array->address();
// Maps slot to boolean indicator of whether the slot should be in the set. // Maps slot to boolean indicator of whether the slot should be in the set.
std::map<Address, bool> slots; std::map<Address, bool> slots;
...@@ -6189,7 +6189,7 @@ HEAP_TEST(Regress5831) { ...@@ -6189,7 +6189,7 @@ HEAP_TEST(Regress5831) {
// Ensure it's not in large object space. // Ensure it's not in large object space.
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*code); MemoryChunk* chunk = MemoryChunk::FromHeapObject(*code);
CHECK(chunk->owner()->identity() != LO_SPACE); CHECK(chunk->owner_identity() != LO_SPACE);
CHECK(chunk->NeverEvacuate()); CHECK(chunk->NeverEvacuate());
} }
......
...@@ -16,8 +16,7 @@ namespace { ...@@ -16,8 +16,7 @@ namespace {
AllocationSpace GetSpaceFromObject(Object object) { AllocationSpace GetSpaceFromObject(Object object) {
DCHECK(object.IsHeapObject()); DCHECK(object.IsHeapObject());
return MemoryChunk::FromHeapObject(HeapObject::cast(object)) return MemoryChunk::FromHeapObject(HeapObject::cast(object))
->owner() ->owner_identity();
->identity();
} }
} // namespace } // namespace
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment