Commit 6f267e8a authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[heap] Use BasicMemoryChunk::FromHeapObject more

Since ReadOnlySpace pages will soon not be MemoryChunks, change most
uses of MemoryChunk::FromHeapObject and FromAddress to use the
BasicMemoryChunk variants and which use the new MemoryChunk::cast
function that takes a BasicMemoryChunk and DCHECKs !InReadOnlySpace().

To enable this, it also moves into BasicMemoryChunk several MemoryChunk
functions that just require a BasicMemoryChunk.

Bug: v8:10454
Change-Id: I80875b2c2446937ac2c2bc9287d36e71cc050c38
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2243216
Commit-Queue: Dan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68390}
parent 8b160ca4
...@@ -58,5 +58,21 @@ BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base, ...@@ -58,5 +58,21 @@ BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
return chunk; return chunk;
} }
bool BasicMemoryChunk::InOldSpace() const {
return owner()->identity() == OLD_SPACE;
}
bool BasicMemoryChunk::InLargeObjectSpace() const {
return owner()->identity() == LO_SPACE;
}
#ifdef THREAD_SANITIZER
void BasicMemoryChunk::SynchronizedHeapLoad() {
CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
InReadOnlySpace());
}
#endif
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/flags/flags.h" #include "src/flags/flags.h"
#include "src/heap/marking.h" #include "src/heap/marking.h"
#include "src/objects/heap-object.h"
#include "src/utils/allocation.h" #include "src/utils/allocation.h"
namespace v8 { namespace v8 {
...@@ -186,9 +187,69 @@ class BasicMemoryChunk { ...@@ -186,9 +187,69 @@ class BasicMemoryChunk {
} }
} }
using Flags = uintptr_t;
static const Flags kPointersToHereAreInterestingMask =
POINTERS_TO_HERE_ARE_INTERESTING;
static const Flags kPointersFromHereAreInterestingMask =
POINTERS_FROM_HERE_ARE_INTERESTING;
static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
static const Flags kIsLargePageMask = LARGE_PAGE;
static const Flags kSkipEvacuationSlotsRecordingMask =
kEvacuationCandidateMask | kIsInYoungGenerationMask;
bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); } bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
// TODO(v8:7464): Add methods for down casting to MemoryChunk. bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
bool CanAllocate() {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool IsEvacuationCandidate() {
DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool ShouldSkipEvacuationSlotRecording() {
uintptr_t flags = GetFlags<access_mode>();
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
((flags & COMPACTION_WAS_ABORTED) == 0);
}
Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
bool IsToPage() const { return IsFlagSet(TO_PAGE); }
bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
bool InYoungGeneration() const {
return (GetFlags() & kIsInYoungGenerationMask) != 0;
}
bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
bool InNewLargeObjectSpace() const {
return InYoungGeneration() && IsLargePage();
}
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
bool IsWritable() const {
// If this is a read-only space chunk but heap_ is non-null, it has not yet
// been sealed and can be written to.
return !InReadOnlySpace() || heap_ != nullptr;
}
bool Contains(Address addr) const { bool Contains(Address addr) const {
return addr >= area_start() && addr < area_end(); return addr >= area_start() && addr < area_end();
...@@ -236,6 +297,12 @@ class BasicMemoryChunk { ...@@ -236,6 +297,12 @@ class BasicMemoryChunk {
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a)); return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
} }
// Only works if the object is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromHeapObject(HeapObject o) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(o.ptr()));
}
template <AccessMode mode> template <AccessMode mode>
ConcurrentBitmap<mode>* marking_bitmap() const { ConcurrentBitmap<mode>* marking_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_); return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
...@@ -275,6 +342,13 @@ class BasicMemoryChunk { ...@@ -275,6 +342,13 @@ class BasicMemoryChunk {
allocated_bytes_ -= bytes; allocated_bytes_ -= bytes;
} }
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race in
// mark-bit initialization. See MemoryChunk::Initialize for the corresponding
// release store.
void SynchronizedHeapLoad();
#endif
protected: protected:
// Overall size of the chunk, including the header and guards. // Overall size of the chunk, including the header and guards.
size_t size_; size_t size_;
......
...@@ -41,7 +41,7 @@ class ConcurrentMarkingState final ...@@ -41,7 +41,7 @@ class ConcurrentMarkingState final
explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data) explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
: memory_chunk_data_(memory_chunk_data) {} : memory_chunk_data_(memory_chunk_data) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) { ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const BasicMemoryChunk* chunk) {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) - DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk), reinterpret_cast<intptr_t>(chunk),
BasicMemoryChunk::kMarkBitmapOffset); BasicMemoryChunk::kMarkBitmapOffset);
...@@ -298,7 +298,7 @@ class ConcurrentMarkingVisitor final ...@@ -298,7 +298,7 @@ class ConcurrentMarkingVisitor final
#ifdef THREAD_SANITIZER #ifdef THREAD_SANITIZER
// This is needed because TSAN does not process the memory fence // This is needed because TSAN does not process the memory fence
// emitted after page initialization. // emitted after page initialization.
MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad(); BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
#endif #endif
} }
......
...@@ -722,7 +722,7 @@ HeapObject FactoryBase<Impl>::AllocateRawArray(int size, ...@@ -722,7 +722,7 @@ HeapObject FactoryBase<Impl>::AllocateRawArray(int size,
AllocationType allocation) { AllocationType allocation) {
HeapObject result = AllocateRaw(size, allocation); HeapObject result = AllocateRaw(size, allocation);
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) { if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(result); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR); chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
} }
return result; return result;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "src/diagnostics/basic-block-profiler.h" #include "src/diagnostics/basic-block-profiler.h"
#include "src/execution/isolate-inl.h" #include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h" #include "src/execution/protectors-inl.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h" #include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h" #include "src/heap/mark-compact-inl.h"
...@@ -394,7 +395,7 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray( ...@@ -394,7 +395,7 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
HeapObject result; HeapObject result;
if (!allocation.To(&result)) return MaybeHandle<FixedArray>(); if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) { if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(result); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR); chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
} }
result.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER); result.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
......
...@@ -401,7 +401,8 @@ bool Heap::InYoungGeneration(MaybeObject object) { ...@@ -401,7 +401,8 @@ bool Heap::InYoungGeneration(MaybeObject object) {
// static // static
bool Heap::InYoungGeneration(HeapObject heap_object) { bool Heap::InYoungGeneration(HeapObject heap_object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false; if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
bool result = MemoryChunk::FromHeapObject(heap_object)->InYoungGeneration(); bool result =
BasicMemoryChunk::FromHeapObject(heap_object)->InYoungGeneration();
#ifdef DEBUG #ifdef DEBUG
// If in the young generation, then check we're either not in the middle of // If in the young generation, then check we're either not in the middle of
// GC or the object is in to-space. // GC or the object is in to-space.
...@@ -429,7 +430,7 @@ bool Heap::InFromPage(MaybeObject object) { ...@@ -429,7 +430,7 @@ bool Heap::InFromPage(MaybeObject object) {
// static // static
bool Heap::InFromPage(HeapObject heap_object) { bool Heap::InFromPage(HeapObject heap_object) {
return MemoryChunk::FromHeapObject(heap_object)->IsFromPage(); return BasicMemoryChunk::FromHeapObject(heap_object)->IsFromPage();
} }
// static // static
...@@ -446,7 +447,7 @@ bool Heap::InToPage(MaybeObject object) { ...@@ -446,7 +447,7 @@ bool Heap::InToPage(MaybeObject object) {
// static // static
bool Heap::InToPage(HeapObject heap_object) { bool Heap::InToPage(HeapObject heap_object) {
return MemoryChunk::FromHeapObject(heap_object)->IsToPage(); return BasicMemoryChunk::FromHeapObject(heap_object)->IsToPage();
} }
bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); } bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
...@@ -456,7 +457,7 @@ Heap* Heap::FromWritableHeapObject(HeapObject obj) { ...@@ -456,7 +457,7 @@ Heap* Heap::FromWritableHeapObject(HeapObject obj) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) { if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return Heap::GetIsolateFromWritableObject(obj)->heap(); return Heap::GetIsolateFromWritableObject(obj)->heap();
} }
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during // find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case. // bootstrapping, so explicitly allow this case.
...@@ -544,7 +545,7 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object, ...@@ -544,7 +545,7 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
PretenuringFeedbackMap* pretenuring_feedback) { PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_); DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
#ifdef DEBUG #ifdef DEBUG
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
DCHECK_IMPLIES(chunk->IsToPage(), DCHECK_IMPLIES(chunk->IsToPage(),
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION)); chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
DCHECK_IMPLIES(!chunk->InYoungGeneration(), DCHECK_IMPLIES(!chunk->InYoungGeneration(),
...@@ -713,24 +714,24 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code) ...@@ -713,24 +714,24 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
: chunk_(nullptr), scope_active_(false) {} : chunk_(nullptr), scope_active_(false) {}
#else #else
CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code) CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
: CodePageMemoryModificationScope(MemoryChunk::FromHeapObject(code)) {} : CodePageMemoryModificationScope(BasicMemoryChunk::FromHeapObject(code)) {}
#endif #endif
CodePageMemoryModificationScope::CodePageMemoryModificationScope( CodePageMemoryModificationScope::CodePageMemoryModificationScope(
MemoryChunk* chunk) BasicMemoryChunk* chunk)
: chunk_(chunk), : chunk_(chunk),
scope_active_(chunk_->heap()->write_protect_code_memory() && scope_active_(chunk_->heap()->write_protect_code_memory() &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) { if (scope_active_) {
DCHECK(chunk_->owner_identity() == CODE_SPACE || DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
(chunk_->owner_identity() == CODE_LO_SPACE)); (chunk_->owner()->identity() == CODE_LO_SPACE));
chunk_->SetReadAndWritable(); MemoryChunk::cast(chunk_)->SetReadAndWritable();
} }
} }
CodePageMemoryModificationScope::~CodePageMemoryModificationScope() { CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
if (scope_active_) { if (scope_active_) {
chunk_->SetDefaultCodePermissions(); MemoryChunk::cast(chunk_)->SetDefaultCodePermissions();
} }
} }
......
...@@ -3029,7 +3029,7 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size, ...@@ -3029,7 +3029,7 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
#ifdef DEBUG #ifdef DEBUG
void VerifyNoNeedToClearSlots(Address start, Address end) { void VerifyNoNeedToClearSlots(Address start, Address end) {
MemoryChunk* chunk = MemoryChunk::FromAddress(start); BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(start);
// TODO(ulan): Support verification of large pages. // TODO(ulan): Support verification of large pages.
if (chunk->InYoungGeneration() || chunk->IsLargePage()) return; if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
Space* space = chunk->owner(); Space* space = chunk->owner();
...@@ -3095,7 +3095,7 @@ bool Heap::InOffThreadSpace(HeapObject heap_object) { ...@@ -3095,7 +3095,7 @@ bool Heap::InOffThreadSpace(HeapObject heap_object) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP #ifdef V8_ENABLE_THIRD_PARTY_HEAP
return false; // currently unsupported return false; // currently unsupported
#else #else
Space* owner = MemoryChunk::FromHeapObject(heap_object)->owner(); Space* owner = BasicMemoryChunk::FromHeapObject(heap_object)->owner();
if (owner->identity() == OLD_SPACE) { if (owner->identity() == OLD_SPACE) {
// TODO(leszeks): Should we exclude compaction spaces here? // TODO(leszeks): Should we exclude compaction spaces here?
return static_cast<PagedSpace*>(owner)->is_off_thread_space(); return static_cast<PagedSpace*>(owner)->is_off_thread_space();
...@@ -3114,12 +3114,12 @@ bool Heap::IsImmovable(HeapObject object) { ...@@ -3114,12 +3114,12 @@ bool Heap::IsImmovable(HeapObject object) {
return true; return true;
} }
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
return chunk->NeverEvacuate() || IsLargeObject(object); return chunk->NeverEvacuate() || IsLargeObject(object);
} }
bool Heap::IsLargeObject(HeapObject object) { bool Heap::IsLargeObject(HeapObject object) {
return MemoryChunk::FromHeapObject(object)->IsLargePage(); return BasicMemoryChunk::FromHeapObject(object)->IsLargePage();
} }
#ifdef ENABLE_SLOW_DCHECKS #ifdef ENABLE_SLOW_DCHECKS
...@@ -3148,7 +3148,8 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor { ...@@ -3148,7 +3148,8 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
namespace { namespace {
bool MayContainRecordedSlots(HeapObject object) { bool MayContainRecordedSlots(HeapObject object) {
// New space object do not have recorded slots. // New space object do not have recorded slots.
if (MemoryChunk::FromHeapObject(object)->InYoungGeneration()) return false; if (BasicMemoryChunk::FromHeapObject(object)->InYoungGeneration())
return false;
// Whitelist objects that definitely do not have pointers. // Whitelist objects that definitely do not have pointers.
if (object.IsByteArray() || object.IsFixedDoubleArray()) return false; if (object.IsByteArray() || object.IsFixedDoubleArray()) return false;
// Conservatively return true for other objects. // Conservatively return true for other objects.
...@@ -5209,7 +5210,7 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) { ...@@ -5209,7 +5210,7 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
if (!Heap::IsImmovable(heap_object)) { if (!Heap::IsImmovable(heap_object)) {
if (isolate()->serializer_enabled() || if (isolate()->serializer_enabled() ||
code_space_->first_page()->Contains(heap_object.address())) { code_space_->first_page()->Contains(heap_object.address())) {
MemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate(); BasicMemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
} else { } else {
// Discard the first code allocation, which was on a page where it could // Discard the first code allocation, which was on a page where it could
// be moved. // be moved.
...@@ -5993,14 +5994,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter { ...@@ -5993,14 +5994,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
bool SkipObject(HeapObject object) override { bool SkipObject(HeapObject object) override {
if (object.IsFreeSpaceOrFiller()) return true; if (object.IsFreeSpaceOrFiller()) return true;
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) return true; if (reachable_.count(chunk) == 0) return true;
return reachable_[chunk]->count(object) == 0; return reachable_[chunk]->count(object) == 0;
} }
private: private:
bool MarkAsReachable(HeapObject object) { bool MarkAsReachable(HeapObject object) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) { if (reachable_.count(chunk) == 0) {
reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>(); reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
} }
...@@ -6082,7 +6083,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter { ...@@ -6082,7 +6083,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
Heap* heap_; Heap* heap_;
DisallowHeapAllocation no_allocation_; DisallowHeapAllocation no_allocation_;
std::unordered_map<MemoryChunk*, std::unordered_map<BasicMemoryChunk*,
std::unordered_set<HeapObject, Object::Hasher>*> std::unordered_set<HeapObject, Object::Hasher>*>
reachable_; reachable_;
}; };
...@@ -6869,7 +6870,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) { ...@@ -6869,7 +6870,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) { if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true; return true;
} }
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
heap_internals::MemoryChunk* slim_chunk = heap_internals::MemoryChunk* slim_chunk =
heap_internals::MemoryChunk::FromHeapObject(object); heap_internals::MemoryChunk::FromHeapObject(object);
...@@ -6878,7 +6879,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) { ...@@ -6878,7 +6879,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING), CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
slim_chunk->IsMarking()); slim_chunk->IsMarking());
AllocationSpace identity = chunk->owner_identity(); AllocationSpace identity = chunk->owner()->identity();
// Generation consistency. // Generation consistency.
CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE, CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
......
...@@ -63,6 +63,7 @@ using v8::MemoryPressureLevel; ...@@ -63,6 +63,7 @@ using v8::MemoryPressureLevel;
class AllocationObserver; class AllocationObserver;
class ArrayBufferCollector; class ArrayBufferCollector;
class ArrayBufferSweeper; class ArrayBufferSweeper;
class BasicMemoryChunk;
class CodeLargeObjectSpace; class CodeLargeObjectSpace;
class ConcurrentMarking; class ConcurrentMarking;
class GCIdleTimeHandler; class GCIdleTimeHandler;
...@@ -2409,12 +2410,12 @@ class CodePageCollectionMemoryModificationScope { ...@@ -2409,12 +2410,12 @@ class CodePageCollectionMemoryModificationScope {
// was registered to be executable. It can be used by concurrent threads. // was registered to be executable. It can be used by concurrent threads.
class CodePageMemoryModificationScope { class CodePageMemoryModificationScope {
public: public:
explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk); explicit inline CodePageMemoryModificationScope(BasicMemoryChunk* chunk);
explicit inline CodePageMemoryModificationScope(Code object); explicit inline CodePageMemoryModificationScope(Code object);
inline ~CodePageMemoryModificationScope(); inline ~CodePageMemoryModificationScope();
private: private:
MemoryChunk* chunk_; BasicMemoryChunk* chunk_;
bool scope_active_; bool scope_active_;
// Disallow any GCs inside this scope, as a relocation of the underlying // Disallow any GCs inside this scope, as a relocation of the underlying
......
...@@ -164,7 +164,7 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() { ...@@ -164,7 +164,7 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() {
} }
LargePage* CodeLargeObjectSpace::FindPage(Address a) { LargePage* CodeLargeObjectSpace::FindPage(Address a) {
const Address key = MemoryChunk::FromAddress(a)->address(); const Address key = BasicMemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key); auto it = chunk_map_.find(key);
if (it != chunk_map_.end()) { if (it != chunk_map_.end()) {
LargePage* page = it->second; LargePage* page = it->second;
...@@ -275,7 +275,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -275,7 +275,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
} }
bool LargeObjectSpace::Contains(HeapObject object) { bool LargeObjectSpace::Contains(HeapObject object) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
bool owned = (chunk->owner() == this); bool owned = (chunk->owner() == this);
......
...@@ -65,7 +65,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot, ...@@ -65,7 +65,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot, void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target) { HeapObject target) {
MemoryChunk* target_page = MemoryChunk::FromHeapObject(target); BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object); MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() && if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) { !source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
...@@ -76,7 +76,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot, ...@@ -76,7 +76,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
void MarkCompactCollector::RecordSlot(MemoryChunk* source_page, void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) { HeapObjectSlot slot, HeapObject target) {
MemoryChunk* target_page = MemoryChunk::FromHeapObject(target); BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) { if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page, RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page,
slot.address()); slot.address());
...@@ -215,7 +215,7 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() { ...@@ -215,7 +215,7 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// Note that we know that we are at a one word filler when // Note that we know that we are at a one word filler when
// object_start + object_size - kTaggedSize == object_start. // object_start + object_size - kTaggedSize == object_start.
if (addr != end) { if (addr != end) {
DCHECK_EQ(chunk_, MemoryChunk::FromAddress(end)); DCHECK_EQ(chunk_, BasicMemoryChunk::FromAddress(end));
uint32_t end_mark_bit_index = chunk_->AddressToMarkbitIndex(end); uint32_t end_mark_bit_index = chunk_->AddressToMarkbitIndex(end);
unsigned int end_cell_index = unsigned int end_cell_index =
end_mark_bit_index >> Bitmap::kBitsPerCellLog2; end_mark_bit_index >> Bitmap::kBitsPerCellLog2;
......
...@@ -1232,7 +1232,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor { ...@@ -1232,7 +1232,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value, inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) { Address slot) {
if (value->IsStrongOrWeak()) { if (value->IsStrongOrWeak()) {
MemoryChunk* p = MemoryChunk::FromAddress(value.ptr()); BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) { if (p->InYoungGeneration()) {
DCHECK_IMPLIES( DCHECK_IMPLIES(
p->IsToPage(), p->IsToPage(),
...@@ -4412,7 +4412,7 @@ class YoungGenerationRecordMigratedSlotVisitor final ...@@ -4412,7 +4412,7 @@ class YoungGenerationRecordMigratedSlotVisitor final
inline void RecordMigratedSlot(HeapObject host, MaybeObject value, inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) final { Address slot) final {
if (value->IsStrongOrWeak()) { if (value->IsStrongOrWeak()) {
MemoryChunk* p = MemoryChunk::FromAddress(value.ptr()); BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) { if (p->InYoungGeneration()) {
DCHECK_IMPLIES( DCHECK_IMPLIES(
p->IsToPage(), p->IsToPage(),
......
...@@ -247,8 +247,10 @@ class MarkCompactCollectorBase { ...@@ -247,8 +247,10 @@ class MarkCompactCollectorBase {
class MinorMarkingState final class MinorMarkingState final
: public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> { : public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
public: public:
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const { ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
return chunk->young_generation_bitmap<AccessMode::ATOMIC>(); const BasicMemoryChunk* chunk) const {
return MemoryChunk::cast(chunk)
->young_generation_bitmap<AccessMode::ATOMIC>();
} }
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) { void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
...@@ -269,8 +271,9 @@ class MinorNonAtomicMarkingState final ...@@ -269,8 +271,9 @@ class MinorNonAtomicMarkingState final
AccessMode::NON_ATOMIC> { AccessMode::NON_ATOMIC> {
public: public:
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap( ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) const { const BasicMemoryChunk* chunk) const {
return chunk->young_generation_bitmap<AccessMode::NON_ATOMIC>(); return MemoryChunk::cast(chunk)
->young_generation_bitmap<AccessMode::NON_ATOMIC>();
} }
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) { void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
...@@ -293,7 +296,8 @@ class MinorNonAtomicMarkingState final ...@@ -293,7 +296,8 @@ class MinorNonAtomicMarkingState final
class MajorMarkingState final class MajorMarkingState final
: public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> { : public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
public: public:
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const { ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) - DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk), reinterpret_cast<intptr_t>(chunk),
BasicMemoryChunk::kMarkBitmapOffset); BasicMemoryChunk::kMarkBitmapOffset);
...@@ -320,7 +324,8 @@ class MajorMarkingState final ...@@ -320,7 +324,8 @@ class MajorMarkingState final
class MajorAtomicMarkingState final class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> { : public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public: public:
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const { ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) - DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk), reinterpret_cast<intptr_t>(chunk),
BasicMemoryChunk::kMarkBitmapOffset); BasicMemoryChunk::kMarkBitmapOffset);
...@@ -337,7 +342,7 @@ class MajorNonAtomicMarkingState final ...@@ -337,7 +342,7 @@ class MajorNonAtomicMarkingState final
AccessMode::NON_ATOMIC> { AccessMode::NON_ATOMIC> {
public: public:
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap( ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) const { const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) - DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk), reinterpret_cast<intptr_t>(chunk),
BasicMemoryChunk::kMarkBitmapOffset); BasicMemoryChunk::kMarkBitmapOffset);
......
...@@ -73,11 +73,11 @@ template <typename ConcreteState, AccessMode access_mode> ...@@ -73,11 +73,11 @@ template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase { class MarkingStateBase {
public: public:
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) { V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
return MarkBitFrom(MemoryChunk::FromHeapObject(obj), obj.ptr()); return MarkBitFrom(BasicMemoryChunk::FromHeapObject(obj), obj.ptr());
} }
// {addr} may be tagged or aligned. // {addr} may be tagged or aligned.
V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) { V8_INLINE MarkBit MarkBitFrom(BasicMemoryChunk* p, Address addr) {
return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex( return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
p->AddressToMarkbitIndex(addr)); p->AddressToMarkbitIndex(addr));
} }
...@@ -115,10 +115,11 @@ class MarkingStateBase { ...@@ -115,10 +115,11 @@ class MarkingStateBase {
} }
V8_INLINE bool GreyToBlack(HeapObject obj) { V8_INLINE bool GreyToBlack(HeapObject obj) {
MemoryChunk* p = MemoryChunk::FromHeapObject(obj); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
MarkBit markbit = MarkBitFrom(p, obj.address()); MarkBit markbit = MarkBitFrom(chunk, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false; if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj.Size()); static_cast<ConcreteState*>(this)->IncrementLiveBytes(
MemoryChunk::cast(chunk), obj.Size());
return true; return true;
} }
......
...@@ -82,14 +82,6 @@ size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk( ...@@ -82,14 +82,6 @@ size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
return AllocatableMemoryInDataPage(); return AllocatableMemoryInDataPage();
} }
#ifdef THREAD_SANITIZER
void MemoryChunk::SynchronizedHeapLoad() {
CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
InReadOnlySpace());
}
#endif
void MemoryChunk::InitializationMemoryFence() { void MemoryChunk::InitializationMemoryFence() {
base::SeqCst_MemoryFence(); base::SeqCst_MemoryFence();
#ifdef THREAD_SANITIZER #ifdef THREAD_SANITIZER
...@@ -171,7 +163,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -171,7 +163,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end, Address area_start, Address area_end,
Executability executable, Space* owner, Executability executable, Space* owner,
VirtualMemory reservation) { VirtualMemory reservation) {
MemoryChunk* chunk = FromAddress(base); MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(base);
DCHECK_EQ(base, chunk->address()); DCHECK_EQ(base, chunk->address());
BasicMemoryChunk::Initialize(heap, base, size, area_start, area_end, owner, BasicMemoryChunk::Initialize(heap, base, size, area_start, area_end, owner,
std::move(reservation)); std::move(reservation));
...@@ -242,12 +234,6 @@ size_t MemoryChunk::CommittedPhysicalMemory() { ...@@ -242,12 +234,6 @@ size_t MemoryChunk::CommittedPhysicalMemory() {
return high_water_mark_; return high_water_mark_;
} }
bool MemoryChunk::InOldSpace() const { return owner_identity() == OLD_SPACE; }
bool MemoryChunk::InLargeObjectSpace() const {
return owner_identity() == LO_SPACE;
}
void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) { void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
if (is_marking) { if (is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
......
...@@ -49,23 +49,6 @@ enum RememberedSetType { ...@@ -49,23 +49,6 @@ enum RememberedSetType {
// any heap object. // any heap object.
class MemoryChunk : public BasicMemoryChunk { class MemoryChunk : public BasicMemoryChunk {
public: public:
using Flags = uintptr_t;
static const Flags kPointersToHereAreInterestingMask =
POINTERS_TO_HERE_ARE_INTERESTING;
static const Flags kPointersFromHereAreInterestingMask =
POINTERS_FROM_HERE_ARE_INTERESTING;
static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
static const Flags kIsLargePageMask = LARGE_PAGE;
static const Flags kSkipEvacuationSlotsRecordingMask =
kEvacuationCandidateMask | kIsInYoungGenerationMask;
// |kDone|: The page state when sweeping is complete or sweeping must not be // |kDone|: The page state when sweeping is complete or sweeping must not be
// performed on that page. Sweeper threads that are done with their work // performed on that page. Sweeper threads that are done with their work
// will set this value and not touch the page anymore. // will set this value and not touch the page anymore.
...@@ -112,13 +95,23 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -112,13 +95,23 @@ class MemoryChunk : public BasicMemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk. // Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) { static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(BasicMemoryChunk::FromAddress(a)); return cast(BasicMemoryChunk::FromAddress(a));
} }
// Only works if the object is in the first kPageSize of the MemoryChunk. // Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(HeapObject o) { static MemoryChunk* FromHeapObject(HeapObject o) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr())); return cast(BasicMemoryChunk::FromHeapObject(o));
}
static MemoryChunk* cast(BasicMemoryChunk* chunk) {
SLOW_DCHECK(!chunk->InReadOnlySpace());
return static_cast<MemoryChunk*>(chunk);
}
static const MemoryChunk* cast(const BasicMemoryChunk* chunk) {
SLOW_DCHECK(!chunk->InReadOnlySpace());
return static_cast<const MemoryChunk*>(chunk);
} }
size_t buckets() const { return SlotSet::BucketsForSize(size()); } size_t buckets() const { return SlotSet::BucketsForSize(size()); }
...@@ -146,13 +139,6 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -146,13 +139,6 @@ class MemoryChunk : public BasicMemoryChunk {
return concurrent_sweeping_ == ConcurrentSweepingState::kDone; return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
} }
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race in
// mark-bit initialization. See MemoryChunk::Initialize for the corresponding
// release store.
void SynchronizedHeapLoad();
#endif
template <RememberedSetType type> template <RememberedSetType type>
bool ContainsSlots() { bool ContainsSlots() {
return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr || return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
...@@ -247,56 +233,6 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -247,56 +233,6 @@ class MemoryChunk : public BasicMemoryChunk {
return external_backing_store_bytes_[type]; return external_backing_store_bytes_[type];
} }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
bool CanAllocate() {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool IsEvacuationCandidate() {
DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool ShouldSkipEvacuationSlotRecording() {
uintptr_t flags = GetFlags<access_mode>();
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
((flags & COMPACTION_WAS_ABORTED) == 0);
}
Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
bool IsToPage() const { return IsFlagSet(TO_PAGE); }
bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
bool InYoungGeneration() const {
return (GetFlags() & kIsInYoungGenerationMask) != 0;
}
bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
bool InNewLargeObjectSpace() const {
return InYoungGeneration() && IsLargePage();
}
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
// Gets the chunk's owner or null if the space has been detached.
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
bool IsWritable() const {
// If this is a read-only space chunk but heap_ is non-null, it has not yet
// been sealed and can be written to.
return !InReadOnlySpace() || heap_ != nullptr;
}
// Gets the chunk's allocation space, potentially dealing with a null owner_ // Gets the chunk's allocation space, potentially dealing with a null owner_
// (like read-only chunks have). // (like read-only chunks have).
inline AllocationSpace owner_identity() const; inline AllocationSpace owner_identity() const;
......
...@@ -17,7 +17,7 @@ namespace internal { ...@@ -17,7 +17,7 @@ namespace internal {
// SemiSpace // SemiSpace
bool SemiSpace::Contains(HeapObject o) const { bool SemiSpace::Contains(HeapObject o) const {
MemoryChunk* memory_chunk = MemoryChunk::FromHeapObject(o); BasicMemoryChunk* memory_chunk = BasicMemoryChunk::FromHeapObject(o);
if (memory_chunk->IsLargePage()) return false; if (memory_chunk->IsLargePage()) return false;
return id_ == kToSpace ? memory_chunk->IsToPage() return id_ == kToSpace ? memory_chunk->IsToPage()
: memory_chunk->IsFromPage(); : memory_chunk->IsFromPage();
...@@ -29,7 +29,7 @@ bool SemiSpace::Contains(Object o) const { ...@@ -29,7 +29,7 @@ bool SemiSpace::Contains(Object o) const {
bool SemiSpace::ContainsSlow(Address a) const { bool SemiSpace::ContainsSlow(Address a) const {
for (const Page* p : *this) { for (const Page* p : *this) {
if (p == MemoryChunk::FromAddress(a)) return true; if (p == BasicMemoryChunk::FromAddress(a)) return true;
} }
return false; return false;
} }
...@@ -42,7 +42,7 @@ bool NewSpace::Contains(Object o) const { ...@@ -42,7 +42,7 @@ bool NewSpace::Contains(Object o) const {
} }
bool NewSpace::Contains(HeapObject o) const { bool NewSpace::Contains(HeapObject o) const {
return MemoryChunk::FromHeapObject(o)->InNewSpace(); return BasicMemoryChunk::FromHeapObject(o)->InNewSpace();
} }
bool NewSpace::ContainsSlow(Address a) const { bool NewSpace::ContainsSlow(Address a) const {
......
...@@ -731,7 +731,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) { ...@@ -731,7 +731,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// The object itself should look OK. // The object itself should look OK.
object.ObjectVerify(isolate); object.ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) { if (identity() != RO_SPACE && !FLAG_verify_heap_skip_remembered_set) {
isolate->heap()->VerifyRememberedSetFor(object); isolate->heap()->VerifyRememberedSetFor(object);
} }
......
...@@ -183,7 +183,7 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics( ...@@ -183,7 +183,7 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
// static // static
bool ReadOnlyHeap::Contains(Address address) { bool ReadOnlyHeap::Contains(Address address) {
return MemoryChunk::FromAddress(address)->InReadOnlySpace(); return BasicMemoryChunk::FromAddress(address)->InReadOnlySpace();
} }
// static // static
...@@ -191,7 +191,7 @@ bool ReadOnlyHeap::Contains(HeapObject object) { ...@@ -191,7 +191,7 @@ bool ReadOnlyHeap::Contains(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) { if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return third_party_heap::Heap::InReadOnlySpace(object.address()); return third_party_heap::Heap::InReadOnlySpace(object.address());
} else { } else {
return MemoryChunk::FromHeapObject(object)->InReadOnlySpace(); return BasicMemoryChunk::FromHeapObject(object)->InReadOnlySpace();
} }
} }
......
...@@ -97,7 +97,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) { ...@@ -97,7 +97,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
// with page initialization. // with page initialization.
HeapObject heap_object; HeapObject heap_object;
if (object->GetHeapObject(&heap_object)) { if (object->GetHeapObject(&heap_object)) {
MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad(); BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
} }
#endif #endif
} }
...@@ -211,7 +211,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size, ...@@ -211,7 +211,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
// object_size > kMaxRegularHeapObjectSize // object_size > kMaxRegularHeapObjectSize
if (V8_UNLIKELY( if (V8_UNLIKELY(
FLAG_young_generation_large_objects && FLAG_young_generation_large_objects &&
MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) { BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE, DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner_identity()); MemoryChunk::FromHeapObject(object)->owner_identity());
if (object.synchronized_compare_and_swap_map_word( if (object.synchronized_compare_and_swap_map_word(
......
...@@ -525,7 +525,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map, ...@@ -525,7 +525,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
target.IterateBodyFast(map, size, &visitor); target.IterateBodyFast(map, size, &visitor);
if (map.IsJSArrayBufferMap()) { if (map.IsJSArrayBufferMap()) {
DCHECK(!MemoryChunk::FromHeapObject(target)->IsLargePage()); DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());
JSArrayBuffer::cast(target).YoungMarkExtensionPromoted(); JSArrayBuffer::cast(target).YoungMarkExtensionPromoted();
} }
} }
......
...@@ -445,6 +445,8 @@ void CopyDictionaryToDoubleElements(Isolate* isolate, FixedArrayBase from_base, ...@@ -445,6 +445,8 @@ void CopyDictionaryToDoubleElements(Isolate* isolate, FixedArrayBase from_base,
void SortIndices(Isolate* isolate, Handle<FixedArray> indices, void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
uint32_t sort_size) { uint32_t sort_size) {
if (sort_size == 0) return;
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking. // store operations that are safe for concurrent marking.
AtomicSlot start(indices->GetFirstElementAddress()); AtomicSlot start(indices->GetFirstElementAddress());
......
...@@ -420,9 +420,9 @@ int32_t String::ToArrayIndex(Address addr) { ...@@ -420,9 +420,9 @@ int32_t String::ToArrayIndex(Address addr) {
bool String::LooksValid() { bool String::LooksValid() {
// TODO(leszeks): Maybe remove this check entirely, Heap::Contains uses // TODO(leszeks): Maybe remove this check entirely, Heap::Contains uses
// basically the same logic as the way we access the heap in the first place. // basically the same logic as the way we access the heap in the first place.
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*this);
// RO_SPACE objects should always be valid. // RO_SPACE objects should always be valid.
if (ReadOnlyHeap::Contains(*this)) return true; if (ReadOnlyHeap::Contains(*this)) return true;
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(*this);
if (chunk->heap() == nullptr) return false; if (chunk->heap() == nullptr) return false;
return chunk->heap()->Contains(*this); return chunk->heap()->Contains(*this);
} }
......
...@@ -2,8 +2,9 @@ ...@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "src/common/globals.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/objects/cell.h" #include "src/objects/cell.h"
#include "src/objects/feedback-cell.h" #include "src/objects/feedback-cell.h"
#include "src/objects/script.h" #include "src/objects/script.h"
...@@ -16,8 +17,10 @@ namespace internal { ...@@ -16,8 +17,10 @@ namespace internal {
namespace { namespace {
AllocationSpace GetSpaceFromObject(Object object) { AllocationSpace GetSpaceFromObject(Object object) {
DCHECK(object.IsHeapObject()); DCHECK(object.IsHeapObject());
return MemoryChunk::FromHeapObject(HeapObject::cast(object)) BasicMemoryChunk* chunk =
->owner_identity(); BasicMemoryChunk::FromHeapObject(HeapObject::cast(object));
if (chunk->InReadOnlySpace()) return RO_SPACE;
return chunk->owner()->identity();
} }
} // namespace } // namespace
......
...@@ -301,13 +301,13 @@ TEST_F(SpacesTest, OffThreadLargeObjectSpaceMergeDuringIncrementalMarking) { ...@@ -301,13 +301,13 @@ TEST_F(SpacesTest, OffThreadLargeObjectSpaceMergeDuringIncrementalMarking) {
TEST_F(SpacesTest, WriteBarrierFromHeapObject) { TEST_F(SpacesTest, WriteBarrierFromHeapObject) {
constexpr Address address1 = Page::kPageSize; constexpr Address address1 = Page::kPageSize;
HeapObject object1 = HeapObject::unchecked_cast(Object(address1)); HeapObject object1 = HeapObject::unchecked_cast(Object(address1));
MemoryChunk* chunk1 = MemoryChunk::FromHeapObject(object1); BasicMemoryChunk* chunk1 = BasicMemoryChunk::FromHeapObject(object1);
heap_internals::MemoryChunk* slim_chunk1 = heap_internals::MemoryChunk* slim_chunk1 =
heap_internals::MemoryChunk::FromHeapObject(object1); heap_internals::MemoryChunk::FromHeapObject(object1);
EXPECT_EQ(static_cast<void*>(chunk1), static_cast<void*>(slim_chunk1)); EXPECT_EQ(static_cast<void*>(chunk1), static_cast<void*>(slim_chunk1));
constexpr Address address2 = 2 * Page::kPageSize - 1; constexpr Address address2 = 2 * Page::kPageSize - 1;
HeapObject object2 = HeapObject::unchecked_cast(Object(address2)); HeapObject object2 = HeapObject::unchecked_cast(Object(address2));
MemoryChunk* chunk2 = MemoryChunk::FromHeapObject(object2); BasicMemoryChunk* chunk2 = BasicMemoryChunk::FromHeapObject(object2);
heap_internals::MemoryChunk* slim_chunk2 = heap_internals::MemoryChunk* slim_chunk2 =
heap_internals::MemoryChunk::FromHeapObject(object2); heap_internals::MemoryChunk::FromHeapObject(object2);
EXPECT_EQ(static_cast<void*>(chunk2), static_cast<void*>(slim_chunk2)); EXPECT_EQ(static_cast<void*>(chunk2), static_cast<void*>(slim_chunk2));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment