Commit 8a437788 authored by Maciej Goszczycki's avatar Maciej Goszczycki Committed by Commit Bot

[roheap] Set owner_ to nullptr for read-only chunks

As it stands most of ReadOnlySpace class's method are unusable once it
has been sealed, since all of its pages are read-only. Set owner_ to null
to ensure nothing unintentionally uses it.

This also helps with separating the ReadOnlySpace from the Heap class in
the future as ReadOnlySpace might not inherit from Space.

Bug: v8:7464
Change-Id: I3b24f20c644d6f5e23647bc1de4d256a20a0eb19
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1637800Reviewed-by: 's avatarDan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Maciej Goszczycki <goszczycki@google.com>
Cr-Commit-Position: refs/heads/master@{#61979}
parent 780c8699
......@@ -263,15 +263,13 @@ void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
}
bool Heap::CanAllocateInReadOnlySpace() {
return !deserialization_complete_ &&
(isolate()->serializer_enabled() ||
!isolate()->initialized_from_snapshot());
return read_only_space()->writable();
}
void Heap::UpdateAllocationsHash(HeapObject object) {
Address object_address = object.address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
AllocationSpace allocation_space = memory_chunk->owner()->identity();
AllocationSpace allocation_space = memory_chunk->owner_identity();
STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
uint32_t value =
......@@ -379,8 +377,7 @@ Heap* Heap::FromWritableHeapObject(const HeapObject obj) {
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
static_cast<ReadOnlySpace*>(chunk->owner())->writable());
SLOW_DCHECK(chunk->IsWritable());
Heap* heap = chunk->heap();
SLOW_DCHECK(heap != nullptr);
return heap;
......@@ -614,8 +611,8 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
scope_active_(chunk_->heap()->write_protect_code_memory() &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) {
DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
(chunk_->owner()->identity() == CODE_LO_SPACE));
DCHECK(chunk_->owner_identity() == CODE_SPACE ||
(chunk_->owner_identity() == CODE_LO_SPACE));
chunk_->SetReadAndWritable();
}
}
......
......@@ -46,23 +46,14 @@ V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow(
// internals are only intended to shortcut write barrier checks.
namespace heap_internals {
struct Space {
static constexpr uintptr_t kIdOffset = 9 * kSystemPointerSize;
V8_INLINE AllocationSpace identity() {
return *reinterpret_cast<AllocationSpace*>(reinterpret_cast<Address>(this) +
kIdOffset);
}
};
struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
static constexpr uintptr_t kHeapOffset =
kFlagsOffset + kUIntptrSize + 4 * kSystemPointerSize;
static constexpr uintptr_t kOwnerOffset =
kHeapOffset + 2 * kSystemPointerSize;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 21;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) {
......@@ -84,13 +75,12 @@ struct MemoryChunk {
V8_INLINE Heap* GetHeap() {
Heap* heap = *reinterpret_cast<Heap**>(reinterpret_cast<Address>(this) +
kHeapOffset);
SLOW_DCHECK(heap != nullptr);
DCHECK_NOT_NULL(heap);
return heap;
}
V8_INLINE Space* GetOwner() {
return *reinterpret_cast<Space**>(reinterpret_cast<Address>(this) +
kOwnerOffset);
V8_INLINE bool InReadOnlySpace() const {
return GetFlags() & kReadOnlySpaceBit;
}
};
......@@ -246,7 +236,7 @@ inline Heap* GetHeapFromWritableObject(const HeapObject object) {
inline bool GetIsolateFromWritableObject(HeapObject obj, Isolate** isolate) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(obj);
if (chunk->GetOwner()->identity() == RO_SPACE) {
if (chunk->InReadOnlySpace()) {
*isolate = nullptr;
return false;
}
......
......@@ -3718,6 +3718,9 @@ const char* Heap::GarbageCollectionReasonToString(
}
bool Heap::Contains(HeapObject value) {
if (ReadOnlyHeap::Contains(value)) {
return false;
}
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
......@@ -5323,7 +5326,7 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address());
if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->DeleteEntry(slot.address());
}
}
......@@ -5333,7 +5336,7 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object));
if (InYoungGeneration(object)) return;
Page* page = Page::FromAddress(slot.address());
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
// Old to old slots are filtered with invalidated slots.
......@@ -5346,7 +5349,7 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
DCHECK(!page->IsLargePage());
if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->DeleteEntry(start, end);
}
}
......@@ -5856,7 +5859,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
InstanceType type = map.instance_type();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
AllocationSpace src = chunk->owner()->identity();
AllocationSpace src = chunk->owner_identity();
switch (src) {
case NEW_SPACE:
return dst == NEW_SPACE || dst == OLD_SPACE;
......@@ -6145,16 +6148,16 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
slim_chunk->IsMarking());
Space* chunk_owner = chunk->owner();
AllocationSpace identity = chunk_owner->identity();
AllocationSpace identity = chunk->owner_identity();
// Generation consistency.
CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
slim_chunk->InYoungGeneration());
// Read-only consistency.
CHECK_EQ(chunk->InReadOnlySpace(), slim_chunk->InReadOnlySpace());
// Marking consistency.
if (identity != RO_SPACE ||
static_cast<ReadOnlySpace*>(chunk->owner())->writable()) {
if (chunk->IsWritable()) {
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
......@@ -6182,9 +6185,6 @@ static_assert(MemoryChunk::kFlagsOffset ==
static_assert(MemoryChunk::kHeapOffset ==
heap_internals::MemoryChunk::kHeapOffset,
"Heap offset inconsistent");
static_assert(MemoryChunk::kOwnerOffset ==
heap_internals::MemoryChunk::kOwnerOffset,
"Owner offset inconsistent");
void Heap::SetEmbedderStackStateForNextFinalizaton(
EmbedderHeapTracer::EmbedderStackState stack_state) {
......
......@@ -990,8 +990,9 @@ class Heap {
// Returns whether the object resides in old space.
inline bool InOldSpace(Object object);
// Checks whether an address/object in the heap (including auxiliary
// area and unused area).
// Checks whether an address/object is in the non-read-only heap (including
// auxiliary area and unused area). Use IsValidHeapObject if checking both
// heaps is required.
V8_EXPORT_PRIVATE bool Contains(HeapObject value);
// Checks whether an address/object in a space.
......
......@@ -1463,7 +1463,7 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
inline bool Visit(HeapObject object, int size) override {
HeapObject target_object;
if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(),
if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
DCHECK(object.map_word().IsForwardingAddress());
return true;
......@@ -3183,7 +3183,7 @@ void MarkCompactCollector::Evacuate() {
sweeper()->AddPageForIterability(p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
DCHECK_EQ(OLD_SPACE, p->owner()->identity());
DCHECK_EQ(OLD_SPACE, p->owner_identity());
sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
}
}
......@@ -3191,7 +3191,7 @@ void MarkCompactCollector::Evacuate() {
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
sweeper()->AddPage(p->owner_identity(), p, Sweeper::REGULAR);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
......
......@@ -108,7 +108,7 @@ void ReadOnlyHeap::ClearSharedHeapForTest() {
// static
bool ReadOnlyHeap::Contains(HeapObject object) {
return Page::FromHeapObject(object)->owner()->identity() == RO_SPACE;
return MemoryChunk::FromHeapObject(object)->InReadOnlySpace();
}
// static
......
......@@ -215,7 +215,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
FLAG_young_generation_large_objects &&
MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner()->identity());
MemoryChunk::FromHeapObject(object)->owner_identity());
if (object.map_slot().Release_CompareAndSwap(
map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
surviving_new_large_objects_.insert({object, map});
......
......@@ -413,7 +413,7 @@ void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
}
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
AllocationSpace space = page->owner()->identity();
AllocationSpace space = page->owner_identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) {
heap()->mark_compact_collector()->sweeper()->AddPage(
space, reinterpret_cast<Page*>(page),
......
......@@ -234,14 +234,21 @@ void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
MemoryChunk* from,
MemoryChunk* to,
size_t amount) {
DCHECK_NOT_NULL(from->owner());
DCHECK_NOT_NULL(to->owner());
base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
amount);
}
AllocationSpace MemoryChunk::owner_identity() const {
if (InReadOnlySpace()) return RO_SPACE;
return owner()->identity();
}
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner()->identity() != NEW_SPACE);
DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
SetFlag(NEVER_ALLOCATE_ON_PAGE);
SetFlag(NEVER_EVACUATE);
......@@ -376,7 +383,7 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity());
}
......@@ -389,7 +396,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) {
......
......@@ -64,16 +64,16 @@ HeapObjectIterator::HeapObjectIterator(Page* page)
space_(reinterpret_cast<PagedSpace*>(page->owner())),
page_range_(page),
current_page_(page_range_.begin()) {
#ifdef DEBUG
Space* owner = page->owner();
#ifdef V8_SHARED_RO_HEAP
// TODO(v8:7464): Always enforce this once PagedSpace::Verify is no longer
// used to verify read-only space for non-shared builds.
#ifdef V8_SHARED_RO_HEAP
DCHECK_NE(owner->identity(), RO_SPACE);
#endif
// Do not access the heap of the read-only space.
DCHECK(owner->identity() == RO_SPACE || owner->identity() == OLD_SPACE ||
owner->identity() == MAP_SPACE || owner->identity() == CODE_SPACE);
DCHECK(!page->InReadOnlySpace());
#endif // V8_SHARED_RO_HEAP
#ifdef DEBUG
AllocationSpace owner = page->owner_identity();
DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
owner == CODE_SPACE);
#endif // DEBUG
}
......@@ -562,8 +562,7 @@ void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
DCHECK(permission == PageAllocator::kRead ||
permission == PageAllocator::kReadExecute);
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner()->identity() == CODE_SPACE ||
owner()->identity() == CODE_LO_SPACE);
DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
......@@ -597,8 +596,7 @@ void MemoryChunk::SetReadAndExecutable() {
void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner()->identity() == CODE_SPACE ||
owner()->identity() == CODE_LO_SPACE);
DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
......@@ -735,6 +733,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
chunk->SetFlag(READ_ONLY_HEAP);
} else {
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
0);
......@@ -771,9 +770,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
Page* page = static_cast<Page*>(chunk);
DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
page->owner()->identity()),
page->area_size());
DCHECK_EQ(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocationStatistics();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
......@@ -862,17 +861,15 @@ Page* Page::ConvertNewToOld(Page* old_page) {
}
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
return size();
return high_water_mark_;
}
bool MemoryChunk::InOldSpace() const {
return owner()->identity() == OLD_SPACE;
}
bool MemoryChunk::InOldSpace() const { return owner_identity() == OLD_SPACE; }
bool MemoryChunk::InLargeObjectSpace() const {
return owner()->identity() == LO_SPACE;
return owner_identity() == LO_SPACE;
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
......@@ -1191,7 +1188,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
reservation->Free();
} else {
// Only read-only pages can have non-initialized reservation object.
DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
DCHECK_EQ(RO_SPACE, chunk->owner_identity());
FreeMemory(page_allocator(chunk->executable()), chunk->address(),
chunk->size());
}
......@@ -1547,8 +1544,6 @@ void MemoryChunk::ReleaseMarkingBitmap() {
// PagedSpace implementation
void Space::CheckOffsetsAreConsistent() const {
static_assert(Space::kIdOffset == heap_internals::Space::kIdOffset,
"ID offset inconsistent");
DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
}
......@@ -1619,6 +1614,7 @@ void PagedSpace::RefillFreeList() {
identity() != MAP_SPACE && identity() != RO_SPACE) {
return;
}
DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
{
......@@ -2071,8 +2067,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// be in map space.
Map map = object.map();
CHECK(map.IsMap());
CHECK(isolate->heap()->map_space()->Contains(map) ||
ReadOnlyHeap::Contains(map));
CHECK(ReadOnlyHeap::Contains(map) ||
isolate->heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
......@@ -2565,7 +2561,7 @@ void NewSpace::Verify(Isolate* isolate) {
// be in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
CHECK(heap()->map_space()->Contains(map) || ReadOnlyHeap::Contains(map));
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// The object should not be code or a map.
CHECK(!object.IsMap());
......@@ -2960,7 +2956,7 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
}
if (!prev_non_evac_node.is_null()) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
if (chunk->owner()->identity() == CODE_SPACE) {
if (chunk->owner_identity() == CODE_SPACE) {
chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
}
prev_non_evac_node.set_next(cur_node.next());
......@@ -3383,6 +3379,7 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
LSAN_IGNORE_OBJECT(categories_[i]);
}
heap_ = nullptr;
owner_ = nullptr;
}
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
......@@ -3619,7 +3616,7 @@ void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
}
void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
......@@ -3727,7 +3724,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
CHECK(heap()->map_space()->Contains(map) || ReadOnlyHeap::Contains(map));
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// We have only the following types in the large object space:
if (!(object.IsAbstractCode() || object.IsSeqString() ||
......@@ -3800,7 +3797,7 @@ void LargeObjectSpace::Print() {
void Page::Print() {
// Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
this->owner()->name());
Heap::GetSpaceName(this->owner_identity()));
printf(" --------------------------------------\n");
HeapObjectIterator objects(this);
unsigned mark_size = 0;
......@@ -3860,7 +3857,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AllocationStep(object_size, result.address(), object_size);
return result;
}
......
......@@ -339,7 +339,12 @@ class MemoryChunk {
// The memory chunk freeing bookkeeping has been performed but the chunk has
// not yet been freed.
UNREGISTERED = 1u << 20
UNREGISTERED = 1u << 20,
// The memory chunk belongs to the read-only heap and does not participate
// in garbage collection. This is used instead of owner for identity
// checking since read-only chunks have no owner once they are detached.
READ_ONLY_HEAP = 1u << 21,
};
using Flags = uintptr_t;
......@@ -606,7 +611,7 @@ class MemoryChunk {
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool IsFlagSet(Flag flag) {
bool IsFlagSet(Flag flag) const {
return (GetFlags<access_mode>() & flag) != 0;
}
......@@ -619,7 +624,7 @@ class MemoryChunk {
// Return all current flags.
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
uintptr_t GetFlags() {
uintptr_t GetFlags() const {
if (access_mode == AccessMode::NON_ATOMIC) {
return flags_;
} else {
......@@ -667,10 +672,24 @@ class MemoryChunk {
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
// Gets the chunk's owner or null if the space has been detached.
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
bool InReadOnlySpace() const {
return IsFlagSet(MemoryChunk::READ_ONLY_HEAP);
}
bool IsWritable() const {
// If this is a read-only space chunk but heap_ is non-null, it has not yet
// been sealed and can be written to.
return !InReadOnlySpace() || heap_ != nullptr;
}
// Gets the chunk's allocation space, potentially dealing with a null owner_
// (like read-only chunks have).
inline AllocationSpace owner_identity() const;
static inline bool HasHeaderSentinel(Address slot_addr);
// Emits a memory barrier. For TSAN builds the other thread needs to perform
......@@ -1024,7 +1043,8 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return heap_;
}
// Identity used in error reporting.
bool IsDetached() const { return heap_ == nullptr; }
AllocationSpace identity() { return id_; }
const char* name() { return Heap::GetSpaceName(id_); }
......@@ -2950,6 +2970,9 @@ class ReadOnlySpace : public PagedSpace {
bool writable() const { return !is_marked_read_only_; }
bool Contains(Address a) = delete;
bool Contains(Object o) = delete;
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
......
......@@ -184,7 +184,7 @@ void Sweeper::StartSweeperTasks() {
void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
if (!page->SweepingDone()) {
ParallelSweepPage(page, page->owner()->identity());
ParallelSweepPage(page, page->owner_identity());
if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
......@@ -500,7 +500,7 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
}
void Sweeper::EnsurePageIsIterable(Page* page) {
AllocationSpace space = page->owner()->identity();
AllocationSpace space = page->owner_identity();
if (IsValidSweepingSpace(space)) {
SweepOrWaitUntilSweepingCompleted(page);
} else {
......@@ -573,7 +573,7 @@ void Sweeper::AddPageForIterability(Page* page) {
DCHECK(sweeping_in_progress_);
DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_);
DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
DCHECK(IsValidIterabilitySpace(page->owner_identity()));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
iterability_list_.push_back(page);
......@@ -581,7 +581,7 @@ void Sweeper::AddPageForIterability(Page* page) {
}
void Sweeper::MakeIterable(Page* page) {
DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
DCHECK(IsValidIterabilitySpace(page->owner_identity()));
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
......
......@@ -96,8 +96,7 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
}
bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
PagedSpace* read_only_space = isolate()->heap()->read_only_space();
if (!read_only_space->Contains(obj)) return false;
if (!ReadOnlyHeap::Contains(obj)) return false;
// For objects in RO_SPACE, never serialize the object, but instead create a
// back reference that encodes the page number as the chunk_index and the
......@@ -105,6 +104,7 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
Address address = obj.address();
Page* page = Page::FromAddress(address);
uint32_t chunk_index = 0;
ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
for (Page* p : *read_only_space) {
if (p == page) break;
++chunk_index;
......
......@@ -567,7 +567,7 @@ void Serializer::ObjectSerializer::SerializeObject() {
int size = object_.Size();
Map map = object_.map();
AllocationSpace space =
MemoryChunk::FromHeapObject(object_)->owner()->identity();
MemoryChunk::FromHeapObject(object_)->owner_identity();
// Young generation large objects are tenured.
if (space == NEW_LO_SPACE) {
space = LO_SPACE;
......
......@@ -213,6 +213,7 @@ void ForceEvacuationCandidate(Page* page) {
CHECK(FLAG_manual_evacuation_candidates_selection);
page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
PagedSpace* space = static_cast<PagedSpace*>(page->owner());
DCHECK_NOT_NULL(space);
Address top = space->top();
Address limit = space->limit();
if (top < limit && Page::FromAllocationAreaAddress(top) == page) {
......
......@@ -5924,7 +5924,7 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) {
// TODO(hpayer): Update the test as soon as we have a tenure limit for LO.
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
CHECK_EQ(NEW_LO_SPACE, chunk->owner_identity());
CHECK(chunk->IsFlagSet(MemoryChunk::LARGE_PAGE));
CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
......@@ -5936,7 +5936,7 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) {
// After the first young generation GC array_small will be in the old
// generation large object space.
chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(LO_SPACE, chunk->owner()->identity());
CHECK_EQ(LO_SPACE, chunk->owner_identity());
CHECK(!chunk->InYoungGeneration());
CcTest::CollectAllAvailableGarbage();
......@@ -5954,7 +5954,7 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
// TODO(hpayer): Update the test as soon as we have a tenure limit for LO.
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
CHECK_EQ(NEW_LO_SPACE, chunk->owner_identity());
CHECK(chunk->IsFlagSet(MemoryChunk::LARGE_PAGE));
CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
......@@ -5966,7 +5966,7 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
// After the first full GC array_small will be in the old generation
// large object space.
chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(LO_SPACE, chunk->owner()->identity());
CHECK_EQ(LO_SPACE, chunk->owner_identity());
CHECK(!chunk->InYoungGeneration());
CcTest::CollectAllAvailableGarbage();
......@@ -5986,7 +5986,7 @@ TEST(YoungGenerationLargeObjectAllocationReleaseScavenger) {
for (int i = 0; i < 10; i++) {
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(20000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
CHECK_EQ(NEW_LO_SPACE, chunk->owner_identity());
CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
}
}
......@@ -6009,7 +6009,7 @@ TEST(UncommitUnusedLargeObjectMemory) {
Handle<FixedArray> array =
isolate->factory()->NewFixedArray(200000, AllocationType::kOld);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array);
CHECK(chunk->owner()->identity() == LO_SPACE);
CHECK(chunk->owner_identity() == LO_SPACE);
intptr_t size_before = array->Size();
size_t committed_memory_before = chunk->CommittedPhysicalMemory();
......@@ -6033,7 +6033,7 @@ TEST(RememberedSetRemoveRange) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(
Page::kPageSize / kTaggedSize, AllocationType::kOld);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array);
CHECK(chunk->owner()->identity() == LO_SPACE);
CHECK(chunk->owner_identity() == LO_SPACE);
Address start = array->address();
// Maps slot to boolean indicator of whether the slot should be in the set.
std::map<Address, bool> slots;
......@@ -6189,7 +6189,7 @@ HEAP_TEST(Regress5831) {
// Ensure it's not in large object space.
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*code);
CHECK(chunk->owner()->identity() != LO_SPACE);
CHECK(chunk->owner_identity() != LO_SPACE);
CHECK(chunk->NeverEvacuate());
}
......
......@@ -16,8 +16,7 @@ namespace {
AllocationSpace GetSpaceFromObject(Object object) {
DCHECK(object.IsHeapObject());
return MemoryChunk::FromHeapObject(HeapObject::cast(object))
->owner()
->identity();
->owner_identity();
}
} // namespace
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment