Commit 2aa8722d authored by Igor Sheludko's avatar Igor Sheludko Committed by V8 LUCI CQ

[ext-code-space] Use cage-friendly HeapObject::map() in GC

... and thus avoid the need for special handling of objects located
in external code space.

This will also allow making HeapObject::IsBlah() checks faster when
external code space is enabled.

Bug: v8:11880
Change-Id: I12d07c05451ff198f0a6182d9b5849f76015e7fb
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3300140Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78104}
parent ea0270e7
......@@ -102,7 +102,7 @@ V8_INLINE Address GetPtrComprCageBaseAddress(Address on_heap_addr) {
#endif // V8_COMPRESS_POINTERS
inline PtrComprCageBase GetPtrComprCageBase(HeapObject object) {
V8_INLINE PtrComprCageBase GetPtrComprCageBase(HeapObject object) {
return GetPtrComprCageBaseFromOnHeapAddress(object.ptr());
}
......
......@@ -97,6 +97,28 @@ V8_INLINE static Isolate* GetIsolateForHeapSandbox(HeapObject object) {
#endif
}
// This is an external code space friendly version of GetPtrComprCageBase(..)
// which also works for objects located in external code space.
//
// NOTE: it's supposed to be used only for the cases where performance doesn't
// matter. For example, in debug only code or in debugging macros.
// In production code the preferred way is to use precomputed cage base value
// which is a result of PtrComprCageBase{isolate} or GetPtrComprCageBase()
// applied to a heap object which is known to not be a part of external code
// space.
V8_INLINE PtrComprCageBase GetPtrComprCageBaseSlow(HeapObject object) {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
Isolate* isolate;
if (GetIsolateFromHeapObject(object, &isolate)) {
return PtrComprCageBase{isolate};
}
// If the Isolate can't be obtained then the heap object is a read-only
// one and therefore not a Code object, so fallback to auto-computing cage
// base value.
}
return GetPtrComprCageBase(object);
}
} // namespace internal
} // namespace v8
......
......@@ -17,21 +17,22 @@ namespace internal {
// Record code statisitcs.
void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
Isolate* isolate) {
if (object.IsScript()) {
PtrComprCageBase cage_base(isolate);
if (object.IsScript(cage_base)) {
Script script = Script::cast(object);
// Log the size of external source code.
Object source = script.source();
if (source.IsExternalString()) {
Object source = script.source(cage_base);
if (source.IsExternalString(cage_base)) {
ExternalString external_source_string = ExternalString::cast(source);
int size = isolate->external_script_source_size();
size += external_source_string.ExternalPayloadSize();
isolate->set_external_script_source_size(size);
}
} else if (object.IsAbstractCode()) {
} else if (object.IsAbstractCode(cage_base)) {
// Record code+metadata statisitcs.
AbstractCode abstract_code = AbstractCode::cast(object);
int size = abstract_code.SizeIncludingMetadata();
if (abstract_code.IsCode()) {
if (abstract_code.IsCode(cage_base)) {
size += isolate->code_and_metadata_size();
isolate->set_code_and_metadata_size(size);
} else {
......@@ -42,7 +43,7 @@ void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
#ifdef DEBUG
// Record code kind and code comment statistics.
isolate->code_kind_statistics()[static_cast<int>(abstract_code.kind())] +=
abstract_code.Size();
abstract_code.Size(cage_base);
CodeStatistics::CollectCodeCommentStatistics(object, isolate);
#endif
}
......
......@@ -41,8 +41,9 @@ namespace internal {
class ConcurrentMarkingState final
: public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
public:
explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
: memory_chunk_data_(memory_chunk_data) {}
ConcurrentMarkingState(PtrComprCageBase cage_base,
MemoryChunkDataMap* memory_chunk_data)
: MarkingStateBase(cage_base), memory_chunk_data_(memory_chunk_data) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const BasicMemoryChunk* chunk) {
return chunk->marking_bitmap<AccessMode::ATOMIC>();
......@@ -95,7 +96,7 @@ class ConcurrentMarkingVisitor final
mark_compact_epoch, code_flush_mode,
embedder_tracing_enabled,
should_keep_ages_unchanged),
marking_state_(memory_chunk_data),
marking_state_(heap->isolate(), memory_chunk_data),
memory_chunk_data_(memory_chunk_data) {}
template <typename T>
......
......@@ -207,7 +207,7 @@ inline bool IsReadOnlyHeapObject(HeapObject object) {
return chunk->InReadOnlySpace();
}
inline bool IsCodeObject(HeapObject object) {
inline bool IsCodeSpaceObject(HeapObject object) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
return chunk->InCodeSpace();
......
......@@ -3870,16 +3870,18 @@ class SlotCollectingVisitor final : public ObjectVisitor {
void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
if (!FLAG_verify_heap) return;
PtrComprCageBase cage_base(isolate());
// Check that Heap::NotifyObjectLayoutChange was called for object transitions
// that are not safe for concurrent marking.
// If you see this check triggering for a freshly allocated object,
// use object->set_map_after_allocation() to initialize its map.
if (pending_layout_change_object_.is_null()) {
if (object.IsJSObject()) {
if (object.IsJSObject(cage_base)) {
// Without double unboxing all in-object fields of a JSObject are tagged.
return;
}
if (object.IsString() &&
if (object.IsString(cage_base) &&
(new_map == ReadOnlyRoots(this).thin_string_map() ||
new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
// When transitioning a string to ThinString,
......@@ -3887,7 +3889,7 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
// tagged fields are introduced.
return;
}
if (FLAG_shared_string_table && object.IsString() &&
if (FLAG_shared_string_table && object.IsString(cage_base) &&
InstanceTypeChecker::IsInternalizedString(new_map.instance_type())) {
// In-place internalization does not change a string's fields.
//
......@@ -3898,12 +3900,12 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
}
// Check that the set of slots before and after the transition match.
SlotCollectingVisitor old_visitor;
object.IterateFast(&old_visitor);
MapWord old_map_word = object.map_word(kRelaxedLoad);
object.IterateFast(cage_base, &old_visitor);
MapWord old_map_word = object.map_word(cage_base, kRelaxedLoad);
// Temporarily set the new map to iterate new slots.
object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
SlotCollectingVisitor new_visitor;
object.IterateFast(&new_visitor);
object.IterateFast(cage_base, &new_visitor);
// Restore the old map.
object.set_map_word(old_map_word, kRelaxedStore);
DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
......@@ -4627,8 +4629,9 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
// In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
chunk->mutex());
PtrComprCageBase cage_base(isolate());
Address start = object.address();
Address end = start + object.Size();
Address end = start + object.Size(cage_base);
std::set<Address> old_to_new;
std::set<std::pair<SlotType, Address>> typed_old_to_new;
if (!InYoungGeneration(object)) {
......@@ -4636,7 +4639,7 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
OldToNewSlotVerifyingVisitor visitor(isolate(), &old_to_new,
&typed_old_to_new,
&this->ephemeron_remembered_set_);
object.IterateBody(&visitor);
object.IterateBody(cage_base, &visitor);
}
// TODO(v8:11797): Add old to old slot set verification once all weak objects
// have their own instance types and slots are recorded for all weak fields.
......@@ -4728,7 +4731,13 @@ void Heap::IterateSmiRoots(RootVisitor* v) {
// the sweeper might actually free the underlying page).
class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
public:
explicit ClearStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
explicit ClearStaleLeftTrimmedHandlesVisitor(Heap* heap)
: heap_(heap)
#if V8_COMPRESS_POINTERS
,
cage_base_(heap->isolate())
#endif // V8_COMPRESS_POINTERS
{
USE(heap_);
}
......@@ -4744,20 +4753,32 @@ class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
}
// The pointer compression cage base value used for decompression of all
// tagged values except references to Code objects.
PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
return cage_base_;
#else
return PtrComprCageBase{};
#endif // V8_COMPRESS_POINTERS
}
private:
inline void FixHandle(FullObjectSlot p) {
if (!(*p).IsHeapObject()) return;
HeapObject current = HeapObject::cast(*p);
if (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
current.IsFreeSpaceOrFiller()) {
if (!current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() &&
current.IsFreeSpaceOrFiller(cage_base())) {
#ifdef DEBUG
// We need to find a FixedArrayBase map after walking the fillers.
while (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
current.IsFreeSpaceOrFiller()) {
while (
!current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() &&
current.IsFreeSpaceOrFiller(cage_base())) {
Address next = current.ptr();
if (current.map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
if (current.map(cage_base()) ==
ReadOnlyRoots(heap_).one_pointer_filler_map()) {
next += kTaggedSize;
} else if (current.map() ==
} else if (current.map(cage_base()) ==
ReadOnlyRoots(heap_).two_pointer_filler_map()) {
next += 2 * kTaggedSize;
} else {
......@@ -4765,14 +4786,19 @@ class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
current = HeapObject::cast(Object(next));
}
DCHECK(current.map_word(kRelaxedLoad).IsForwardingAddress() ||
current.IsFixedArrayBase());
DCHECK(
current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() ||
current.IsFixedArrayBase(cage_base()));
#endif // DEBUG
p.store(Smi::zero());
}
}
Heap* heap_;
#if V8_COMPRESS_POINTERS
const PtrComprCageBase cage_base_;
#endif // V8_COMPRESS_POINTERS
};
void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
......@@ -6413,7 +6439,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
: ObjectVisitorWithCageBases(filter->heap_), filter_(filter) {}
void VisitMapPointer(HeapObject object) override {
MarkHeapObject(Map::unchecked_cast(object.map()));
MarkHeapObject(Map::unchecked_cast(object.map(cage_base())));
}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
......@@ -6453,7 +6479,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
while (!marking_stack_.empty()) {
HeapObject obj = marking_stack_.back();
marking_stack_.pop_back();
obj.Iterate(this);
obj.Iterate(cage_base(), this);
}
}
......@@ -6962,7 +6988,7 @@ void VerifyPointersVisitor::VerifyPointers(HeapObject host,
// to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
// this by moving that object to POINTER_VISITOR_ID_LIST.
DCHECK_EQ(ObjectFields::kMaybePointers,
Map::ObjectFieldsFrom(host.map().visitor_id()));
Map::ObjectFieldsFrom(host.map(cage_base()).visitor_id()));
VerifyPointersImpl(start, end);
}
......
......@@ -897,7 +897,6 @@ class Heap {
}
inline Isolate* isolate();
inline const Isolate* isolate() const;
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_.get();
......
......@@ -47,13 +47,15 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
incremental_marking_->EnsureBlackAllocated(addr, size);
}
IncrementalMarking::IncrementalMarking(Heap* heap,
WeakObjects* weak_objects)
IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
: heap_(heap),
collector_(heap->mark_compact_collector()),
weak_objects_(weak_objects),
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
old_generation_observer_(this, kOldGenerationAllocatedThreshold),
marking_state_(heap->isolate()),
atomic_marking_state_(heap->isolate()),
non_atomic_marking_state_(heap->isolate()) {
SetState(STOPPED);
}
......@@ -436,6 +438,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap());
PtrComprCageBase cage_base(heap_->isolate());
collector_->marking_worklists()->Update(
[
#ifdef DEBUG
......@@ -445,11 +448,11 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
#ifdef ENABLE_MINOR_MC
minor_marking_state,
#endif
filler_map](HeapObject obj, HeapObject* out) -> bool {
cage_base, filler_map](HeapObject obj, HeapObject* out) -> bool {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
MapWord map_word = obj.map_word(kRelaxedLoad);
MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist
// anymore, e.g. left trimmed objects or objects from the root set
......@@ -490,10 +493,10 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
return true;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
obj.IsFreeSpaceOrFiller());
obj.IsFreeSpaceOrFiller(cage_base));
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
if (obj.map() != filler_map) {
if (obj.map(cage_base) != filler_map) {
*out = obj;
return true;
}
......
......@@ -267,7 +267,8 @@ void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
size_t object_size = static_cast<size_t>(page->GetObject().Size());
PtrComprCageBase cage_base(heap()->isolate());
size_t object_size = static_cast<size_t>(page->GetObject().Size(cage_base));
static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
page->ClearFlag(MemoryChunk::FROM_PAGE);
AddPage(page, object_size);
......@@ -300,11 +301,12 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
size_t surviving_object_size = 0;
PtrComprCageBase cage_base(heap()->isolate());
while (current) {
LargePage* next_current = current->next_page();
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
size_t size = static_cast<size_t>(object.Size());
size_t size = static_cast<size_t>(object.Size(cage_base));
if (marking_state->IsBlack(object)) {
Address free_start;
surviving_object_size += size;
......@@ -316,7 +318,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
current, free_start, bytes_to_free,
current->area_start() + object.Size());
current->area_start() + object.Size(cage_base));
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
......@@ -406,7 +408,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// Byte arrays and strings don't have interior pointers.
if (object.IsAbstractCode(cage_base)) {
VerifyPointersVisitor code_visitor(heap());
object.IterateBody(map, object.Size(), &code_visitor);
object.IterateBody(map, object.Size(cage_base), &code_visitor);
} else if (object.IsFixedArray(cage_base)) {
FixedArray array = FixedArray::cast(object);
for (int j = 0; j < array.length(); j++) {
......@@ -520,11 +522,12 @@ void NewLargeObjectSpace::FreeDeadObjects(
bool is_marking = heap()->incremental_marking()->IsMarking();
size_t surviving_object_size = 0;
bool freed_pages = false;
PtrComprCageBase cage_base(heap()->isolate());
for (auto it = begin(); it != end();) {
LargePage* page = *it;
it++;
HeapObject object = page->GetObject();
size_t size = static_cast<size_t>(object.Size());
size_t size = static_cast<size_t>(object.Size(cage_base));
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
......
......@@ -119,7 +119,9 @@ class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
VerifyRootPointers(start, end);
}
void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
void VisitMapPointer(HeapObject object) override {
VerifyMap(object.map(cage_base()));
}
void VerifyRoots();
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
......@@ -148,7 +150,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
if (current >= end) break;
CHECK(IsMarked(object));
CHECK(current >= next_object_must_be_here_or_later);
object.Iterate(this);
object.Iterate(cage_base(), this);
next_object_must_be_here_or_later = current + size;
// The object is either part of a black area of black allocation or a
// regular black object
......@@ -190,7 +192,7 @@ void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
obj.Iterate(this);
obj.Iterate(cage_base(), this);
}
}
}
......@@ -316,7 +318,9 @@ class EvacuationVerifier : public ObjectVisitorWithCageBases,
VerifyRootPointers(start, end);
}
void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
void VisitMapPointer(HeapObject object) override {
VerifyMap(object.map(cage_base()));
}
protected:
explicit EvacuationVerifier(Heap* heap)
......@@ -347,8 +351,10 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
Address current = start;
while (current < end) {
HeapObject object = HeapObject::FromAddress(current);
if (!object.IsFreeSpaceOrFiller(cage_base())) object.Iterate(this);
current += object.Size();
if (!object.IsFreeSpaceOrFiller(cage_base())) {
object.Iterate(cage_base(), this);
}
current += object.Size(cage_base());
}
}
......@@ -473,6 +479,8 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
state_(IDLE),
#endif
is_shared_heap_(heap->IsShared()),
marking_state_(heap->isolate()),
non_atomic_marking_state_(heap->isolate()),
sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
}
......@@ -1415,6 +1423,14 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
protected:
enum MigrationMode { kFast, kObserved };
PtrComprCageBase cage_base() {
#if V8_COMPRESS_POINTERS
return PtrComprCageBase{heap_->isolate()};
#else
return PtrComprCageBase{};
#endif // V8_COMPRESS_POINTERS
}
using MigrateFunction = void (*)(EvacuateVisitorBase* base, HeapObject dst,
HeapObject src, int size,
AllocationSpace dest);
......@@ -1424,7 +1440,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
HeapObject src, int size, AllocationSpace dest) {
Address dst_addr = dst.address();
Address src_addr = src.address();
DCHECK(base->heap_->AllowedToBeMigrated(src.map(), src, dest));
PtrComprCageBase cage_base = base->cage_base();
DCHECK(base->heap_->AllowedToBeMigrated(src.map(cage_base), src, dest));
DCHECK_NE(dest, LO_SPACE);
DCHECK_NE(dest, CODE_LO_SPACE);
if (dest == OLD_SPACE) {
......@@ -1433,7 +1450,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
}
......@@ -1444,7 +1461,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
code.Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
......@@ -1473,7 +1490,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (FLAG_stress_compaction && AbortCompactionForTesting(object))
return false;
#endif // DEBUG
Map map = object.map();
Map map = object.map(cage_base());
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation;
if (ShouldPromoteIntoSharedHeap(map)) {
......@@ -1682,7 +1699,9 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
} else if (mode == NEW_TO_OLD) {
object.IterateBodyFast(record_visitor_);
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
PtrComprCageBase cage_base = GetPtrComprCageBase(object);
object.IterateBodyFast(cage_base, record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
record_visitor_->MarkArrayBufferExtensionPromoted(object);
}
......@@ -1710,7 +1729,8 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
HeapObject target_object;
if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
DCHECK(object.map_word(kRelaxedLoad).IsForwardingAddress());
DCHECK(object.map_word(heap_->isolate(), kRelaxedLoad)
.IsForwardingAddress());
return true;
}
return false;
......@@ -1724,7 +1744,9 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
inline bool Visit(HeapObject object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
&heap_->ephemeron_remembered_set_);
object.IterateBodyFast(&visitor);
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
PtrComprCageBase cage_base = GetPtrComprCageBase(object);
object.IterateBodyFast(cage_base, &visitor);
return true;
}
......@@ -1767,7 +1789,7 @@ void MarkCompactCollector::RevisitObject(HeapObject obj) {
DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->ProgressBar().IsEnabled(),
0u == MemoryChunk::FromHeapObject(obj)->ProgressBar().Value());
MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
marking_visitor_->Visit(obj.map(), obj);
marking_visitor_->Visit(obj.map(marking_visitor_->cage_base()), obj);
}
bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
......@@ -2078,7 +2100,8 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
if (!code.CanDeoptAt(isolate, it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code.map(), code, visitor);
PtrComprCageBase cage_base(isolate);
Code::BodyDescriptor::IterateBody(code.map(cage_base), code, visitor);
}
return;
}
......@@ -3914,7 +3937,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
PointersUpdatingVisitor visitor(heap_);
for (Address cur = start_; cur < end_;) {
HeapObject object = HeapObject::FromAddress(cur);
Map map = object.map();
Map map = object.map(visitor.cage_base());
int size = object.SizeFromMap(map);
object.IterateBodyFast(map, size, &visitor);
cur += size;
......@@ -3929,7 +3952,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
PointersUpdatingVisitor visitor(heap_);
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
object_and_size.first.IterateBodyFast(&visitor);
object_and_size.first.IterateBodyFast(visitor.cage_base(), &visitor);
}
}
......@@ -4708,6 +4731,8 @@ void MinorMarkCompactCollector::TearDown() {}
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
marking_state_(heap->isolate()),
non_atomic_marking_state_(heap->isolate()),
main_marking_visitor_(new YoungGenerationMarkingVisitor(
heap->isolate(), marking_state(), worklist_, kMainMarker)),
page_parallel_job_semaphore_(0) {
......@@ -4969,7 +4994,8 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
Map map = object.map(kAcquireLoad);
PtrComprCageBase cage_base(p->heap()->isolate());
Map map = object.map(cage_base, kAcquireLoad);
int size = object.SizeFromMap(map);
free_start = free_end + size;
}
......
......@@ -243,6 +243,9 @@ class MarkCompactCollectorBase {
class MinorMarkingState final
: public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
public:
explicit MinorMarkingState(PtrComprCageBase cage_base)
: MarkingStateBase(cage_base) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return MemoryChunk::cast(chunk)
......@@ -266,6 +269,9 @@ class MinorNonAtomicMarkingState final
: public MarkingStateBase<MinorNonAtomicMarkingState,
AccessMode::NON_ATOMIC> {
public:
explicit MinorNonAtomicMarkingState(PtrComprCageBase cage_base)
: MarkingStateBase(cage_base) {}
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return MemoryChunk::cast(chunk)
......@@ -292,6 +298,9 @@ class MinorNonAtomicMarkingState final
class MajorMarkingState final
: public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
public:
explicit MajorMarkingState(PtrComprCageBase cage_base)
: MarkingStateBase(cage_base) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return chunk->marking_bitmap<AccessMode::ATOMIC>();
......@@ -317,6 +326,9 @@ class MajorMarkingState final
class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public:
explicit MajorAtomicMarkingState(PtrComprCageBase cage_base)
: MarkingStateBase(cage_base) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return chunk->marking_bitmap<AccessMode::ATOMIC>();
......@@ -331,6 +343,9 @@ class MajorNonAtomicMarkingState final
: public MarkingStateBase<MajorNonAtomicMarkingState,
AccessMode::NON_ATOMIC> {
public:
explicit MajorNonAtomicMarkingState(PtrComprCageBase cage_base)
: MarkingStateBase(cage_base) {}
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
......@@ -779,11 +794,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
std::vector<std::pair<Address, Page*>>
aborted_evacuation_candidates_due_to_flags_;
Sweeper* sweeper_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
Sweeper* sweeper_;
// Counts the number of major mark-compact collections. The counter is
// incremented right after marking. This is used for:
// - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
......@@ -878,14 +893,14 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
MarkingWorklist* worklist_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
std::vector<Page*> new_space_evacuation_pages_;
std::vector<Page*> sweep_to_iterate_pages_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingJob;
friend class YoungGenerationMarkingVisitor;
......
......@@ -27,6 +27,7 @@ MarkingBarrier::MarkingBarrier(Heap* heap)
collector_(heap_->mark_compact_collector()),
incremental_marking_(heap_->incremental_marking()),
worklist_(collector_->marking_worklists()->shared()),
marking_state_(heap_->isolate()),
is_main_thread_barrier_(true),
is_shared_heap_(heap_->IsShared()) {}
......@@ -35,6 +36,7 @@ MarkingBarrier::MarkingBarrier(LocalHeap* local_heap)
collector_(heap_->mark_compact_collector()),
incremental_marking_(nullptr),
worklist_(collector_->marking_worklists()->shared()),
marking_state_(heap_->isolate()),
is_main_thread_barrier_(false),
is_shared_heap_(heap_->IsShared()) {}
......
......@@ -26,6 +26,23 @@ struct EphemeronMarking {
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
explicit MarkingStateBase(PtrComprCageBase cage_base)
#if V8_COMPRESS_POINTERS
: cage_base_(cage_base)
#endif
{
}
// The pointer compression cage base value used for decompression of all
// tagged values except references to Code objects.
V8_INLINE PtrComprCageBase cage_base() const {
#if V8_COMPRESS_POINTERS
return cage_base_;
#else
return PtrComprCageBase{};
#endif // V8_COMPRESS_POINTERS
}
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
return MarkBitFrom(BasicMemoryChunk::FromHeapObject(obj), obj.ptr());
}
......@@ -73,7 +90,7 @@ class MarkingStateBase {
MarkBit markbit = MarkBitFrom(chunk, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
static_cast<ConcreteState*>(this)->IncrementLiveBytes(
MemoryChunk::cast(chunk), obj.Size());
MemoryChunk::cast(chunk), obj.Size(cage_base()));
return true;
}
......@@ -85,6 +102,11 @@ class MarkingStateBase {
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
}
private:
#if V8_COMPRESS_POINTERS
const PtrComprCageBase cage_base_;
#endif // V8_COMPRESS_POINTERS
};
// The base class for all marking visitors. It implements marking logic with
......
......@@ -338,11 +338,12 @@ std::unique_ptr<v8::MeasureMemoryDelegate> MemoryMeasurement::DefaultDelegate(
bool NativeContextInferrer::InferForContext(Isolate* isolate, Context context,
Address* native_context) {
Map context_map = context.map(kAcquireLoad);
PtrComprCageBase cage_base(isolate);
Map context_map = context.map(cage_base, kAcquireLoad);
Object maybe_native_context =
TaggedField<Object, Map::kConstructorOrBackPointerOrNativeContextOffset>::
Acquire_Load(isolate, context_map);
if (maybe_native_context.IsNativeContext()) {
Acquire_Load(cage_base, context_map);
if (maybe_native_context.IsNativeContext(cage_base)) {
*native_context = maybe_native_context.ptr();
return true;
}
......
......@@ -703,6 +703,7 @@ void NewSpace::Verify(Isolate* isolate) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
PtrComprCageBase cage_base(isolate);
while (current != top()) {
if (!Page::IsAlignedToPageSize(current)) {
// The allocation pointer should not be in the middle of an object.
......@@ -713,23 +714,23 @@ void NewSpace::Verify(Isolate* isolate) {
// The first word should be a map, and we expect all map pointers to
// be in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
Map map = object.map(cage_base);
CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// The object should not be code or a map.
CHECK(!object.IsMap());
CHECK(!object.IsAbstractCode());
CHECK(!object.IsMap(cage_base));
CHECK(!object.IsAbstractCode(cage_base));
// The object itself should look OK.
object.ObjectVerify(isolate);
// All the interior pointers should be contained in the heap.
VerifyPointersVisitor visitor(heap());
int size = object.Size();
int size = object.Size(cage_base);
object.IterateBody(map, size, &visitor);
if (object.IsExternalString()) {
if (object.IsExternalString(cage_base)) {
ExternalString external_string = ExternalString::cast(object);
size_t string_size = external_string.ExternalPayloadSize();
external_space_bytes[ExternalBackingStoreType::kExternalString] +=
......
......@@ -23,6 +23,7 @@
#include "src/objects/literal-objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/templates.h"
#include "src/objects/visitors.h"
#include "src/utils/memcopy.h"
#include "src/utils/ostreams.h"
......@@ -31,14 +32,15 @@ namespace internal {
static base::LazyMutex object_stats_mutex = LAZY_MUTEX_INITIALIZER;
class FieldStatsCollector : public ObjectVisitor {
class FieldStatsCollector : public ObjectVisitorWithCageBases {
public:
FieldStatsCollector(size_t* tagged_fields_count,
FieldStatsCollector(Heap* heap, size_t* tagged_fields_count,
size_t* embedder_fields_count,
size_t* inobject_smi_fields_count,
size_t* boxed_double_fields_count,
size_t* string_data_count, size_t* raw_fields_count)
: tagged_fields_count_(tagged_fields_count),
: ObjectVisitorWithCageBases(heap),
tagged_fields_count_(tagged_fields_count),
embedder_fields_count_(embedder_fields_count),
inobject_smi_fields_count_(inobject_smi_fields_count),
boxed_double_fields_count_(boxed_double_fields_count),
......@@ -47,16 +49,16 @@ class FieldStatsCollector : public ObjectVisitor {
void RecordStats(HeapObject host) {
size_t old_pointer_fields_count = *tagged_fields_count_;
host.Iterate(this);
host.Iterate(cage_base(), this);
size_t tagged_fields_count_in_object =
*tagged_fields_count_ - old_pointer_fields_count;
int object_size_in_words = host.Size() / kTaggedSize;
int object_size_in_words = host.Size(cage_base()) / kTaggedSize;
DCHECK_LE(tagged_fields_count_in_object, object_size_in_words);
size_t raw_fields_count_in_object =
object_size_in_words - tagged_fields_count_in_object;
if (host.IsJSObject()) {
if (host.IsJSObject(cage_base())) {
JSObjectFieldStats field_stats = GetInobjectFieldStats(host.map());
// Embedder fields are already included into pointer words.
DCHECK_LE(field_stats.embedded_fields_count_,
......@@ -69,11 +71,11 @@ class FieldStatsCollector : public ObjectVisitor {
tagged_fields_count_in_object -= field_stats.smi_fields_count_;
*tagged_fields_count_ -= field_stats.smi_fields_count_;
*inobject_smi_fields_count_ += field_stats.smi_fields_count_;
} else if (host.IsHeapNumber()) {
} else if (host.IsHeapNumber(cage_base())) {
DCHECK_LE(kDoubleSize / kTaggedSize, raw_fields_count_in_object);
raw_fields_count_in_object -= kDoubleSize / kTaggedSize;
*boxed_double_fields_count_ += 1;
} else if (host.IsSeqString()) {
} else if (host.IsSeqString(cage_base())) {
int string_data = SeqString::cast(host).length(kAcquireLoad) *
(String::cast(host).IsOneByteRepresentation() ? 1 : 2) /
kTaggedSize;
......@@ -456,7 +458,7 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()),
field_stats_collector_(
&stats->tagged_fields_count_, &stats->embedder_fields_count_,
heap_, &stats->tagged_fields_count_, &stats->embedder_fields_count_,
&stats->inobject_smi_fields_count_,
&stats->boxed_double_fields_count_, &stats->string_data_count_,
&stats->raw_fields_count_) {}
......
......@@ -29,7 +29,7 @@ HeapObject PagedSpaceObjectIterator::Next() {
HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
HeapObject obj = HeapObject::FromAddress(cur_addr_);
const int obj_size = obj.Size();
const int obj_size = obj.Size(cage_base());
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller(cage_base())) {
......
......@@ -738,8 +738,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map map = object.map();
CHECK(map.IsMap());
Map map = object.map(cage_base);
CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) ||
isolate->heap()->map_space()->Contains(map));
......@@ -754,7 +754,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
}
// All the interior pointers should be contained in the heap.
int size = object.Size();
int size = object.Size(cage_base);
object.IterateBody(map, size, visitor);
CHECK(object.address() + size <= top);
end_of_previous_object = object.address() + size;
......@@ -793,6 +793,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
void PagedSpace::VerifyLiveBytes() {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
PtrComprCageBase cage_base(heap()->isolate());
for (Page* page : *this) {
CHECK(page->SweepingDone());
PagedSpaceObjectIterator it(heap(), this, page);
......@@ -800,7 +801,7 @@ void PagedSpace::VerifyLiveBytes() {
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
if (marking_state->IsBlack(object)) {
black_size += object.Size();
black_size += object.Size(cage_base);
}
}
CHECK_LE(black_size, marking_state->live_bytes(page));
......@@ -812,6 +813,7 @@ void PagedSpace::VerifyLiveBytes() {
void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
size_t total_capacity = 0;
size_t total_allocated = 0;
PtrComprCageBase cage_base(heap->isolate());
for (Page* page : *this) {
DCHECK(page->SweepingDone());
total_capacity += page->area_size();
......@@ -819,7 +821,7 @@ void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
size_t real_allocated = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
if (!object.IsFreeSpaceOrFiller()) {
real_allocated += object.Size();
real_allocated += object.Size(cage_base);
}
}
total_allocated += page->allocated_bytes();
......
......@@ -136,7 +136,7 @@ Address SkipFillers(PtrComprCageBase cage_base, HeapObject filler,
while (addr < end) {
filler = HeapObject::FromAddress(addr);
CHECK(filler.IsFreeSpaceOrFiller(cage_base));
addr = filler.address() + filler.Size();
addr = filler.address() + filler.Size(cage_base);
}
return addr;
}
......@@ -184,7 +184,7 @@ size_t Page::ShrinkToHighWaterMark() {
this, address() + size() - unused, unused, area_end() - unused);
if (filler.address() != area_end()) {
CHECK(filler.IsFreeSpaceOrFiller(cage_base));
CHECK_EQ(filler.address() + filler.Size(), area_end());
CHECK_EQ(filler.address() + filler.Size(cage_base), area_end());
}
}
return unused;
......
......@@ -122,9 +122,10 @@ class HeapObject : public Object {
// If it's not performance critical iteration use the non-templatized
// version.
void Iterate(ObjectVisitor* v);
void Iterate(PtrComprCageBase cage_base, ObjectVisitor* v);
template <typename ObjectVisitor>
inline void IterateFast(ObjectVisitor* v);
inline void IterateFast(PtrComprCageBase cage_base, ObjectVisitor* v);
// Iterates over all pointers contained in the object except the
// first map pointer. The object type is given in the first
......@@ -132,11 +133,12 @@ class HeapObject : public Object {
// object, and so is safe to call while the map pointer is modified.
// If it's not performance critical iteration use the non-templatized
// version.
void IterateBody(ObjectVisitor* v);
inline void IterateBody(ObjectVisitor* v);
void IterateBody(PtrComprCageBase cage_base, ObjectVisitor* v);
void IterateBody(Map map, int object_size, ObjectVisitor* v);
template <typename ObjectVisitor>
inline void IterateBodyFast(ObjectVisitor* v);
inline void IterateBodyFast(PtrComprCageBase cage_base, ObjectVisitor* v);
template <typename ObjectVisitor>
inline void IterateBodyFast(Map map, int object_size, ObjectVisitor* v);
......@@ -147,7 +149,7 @@ class HeapObject : public Object {
V8_EXPORT_PRIVATE bool IsValidSlot(Map map, int offset);
// Returns the heap object's size in bytes
inline int Size() const;
DECL_GETTER(Size, int)
// Given a heap object's map pointer, returns the heap size in bytes
// Useful when the map pointer field is used for other purposes.
......
......@@ -116,7 +116,7 @@ HEAP_OBJECT_TYPE_LIST(DECL_TYPE)
/* the main pointer compression cage. */ \
bool HeapObject::Is##type(PtrComprCageBase cage_base) const { \
if (V8_EXTERNAL_CODE_SPACE_BOOL) { \
if (IsCodeObject(*this)) { \
if (IsCodeSpaceObject(*this)) { \
/* Code space contains only Code objects and free space fillers. */ \
if (std::is_same<InstanceTypeTraits::type, \
InstanceTypeTraits::Code>::value || \
......
......@@ -1242,14 +1242,14 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
}
template <typename ObjectVisitor>
void HeapObject::IterateFast(ObjectVisitor* v) {
void HeapObject::IterateFast(PtrComprCageBase cage_base, ObjectVisitor* v) {
v->VisitMapPointer(*this);
IterateBodyFast(v);
IterateBodyFast(cage_base, v);
}
template <typename ObjectVisitor>
void HeapObject::IterateBodyFast(ObjectVisitor* v) {
Map m = map();
void HeapObject::IterateBodyFast(PtrComprCageBase cage_base, ObjectVisitor* v) {
Map m = map(cage_base);
IterateBodyFast(m, SizeFromMap(m), v);
}
......
......@@ -752,20 +752,11 @@ ReadOnlyRoots HeapObject::GetReadOnlyRoots(PtrComprCageBase cage_base) const {
}
Map HeapObject::map() const {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// TODO(v8:11880): Ensure that cage friendly version is used for the cases
// when this could be a Code object. Replace this with
// DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeObject(*this));
Isolate* isolate;
if (GetIsolateFromHeapObject(*this, &isolate)) {
PtrComprCageBase cage_base(isolate);
return HeapObject::map(cage_base);
}
// If the Isolate can't be obtained then the heap object is a read-only
// one and therefore not a Code object, so fallback to auto-computing cage
// base value.
}
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
// when this could be a Code object. Add
// DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
// and use GetPtrComprCageBase(*this) here.
PtrComprCageBase cage_base = GetPtrComprCageBaseSlow(*this);
return HeapObject::map(cage_base);
}
Map HeapObject::map(PtrComprCageBase cage_base) const {
......@@ -845,20 +836,11 @@ ObjectSlot HeapObject::map_slot() const {
}
MapWord HeapObject::map_word(RelaxedLoadTag tag) const {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// TODO(v8:11880): Ensure that cage friendly version is used for the cases
// when this could be a Code object. Replace this with
// DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeObject(*this));
Isolate* isolate;
if (GetIsolateFromHeapObject(*this, &isolate)) {
PtrComprCageBase cage_base(isolate);
return HeapObject::map_word(cage_base, tag);
}
// If the Isolate can't be obtained then the heap object is a read-only
// one and therefore not a Code object, so fallback to auto-computing cage
// base value.
}
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
// when this could be a Code object. Add
// DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
// and use GetPtrComprCageBase(*this) here.
PtrComprCageBase cage_base = GetPtrComprCageBaseSlow(*this);
return HeapObject::map_word(cage_base, tag);
}
MapWord HeapObject::map_word(PtrComprCageBase cage_base,
......@@ -871,9 +853,10 @@ void HeapObject::set_map_word(MapWord map_word, RelaxedStoreTag) {
}
MapWord HeapObject::map_word(AcquireLoadTag tag) const {
// This method is never used for Code objects and thus it is fine to use
// auto-computed cage base value.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeObject(*this));
// This method is never used for objects located in code space (Code and
// free space fillers) and thus it is fine to use auto-computed cage base
// value.
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return HeapObject::map_word(cage_base, tag);
}
......@@ -893,7 +876,18 @@ bool HeapObject::release_compare_and_swap_map_word(MapWord old_map_word,
return result == static_cast<Tagged_t>(old_map_word.ptr());
}
int HeapObject::Size() const { return SizeFromMap(map()); }
// TODO(v8:11880): consider dropping parameterless version.
int HeapObject::Size() const {
// TODO(v8:11880): Ensure that cage friendly version is used for the cases
// when this could be a Code object. Add
// DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
// and use GetPtrComprCageBase(*this) here.
PtrComprCageBase cage_base = GetPtrComprCageBaseSlow(*this);
return HeapObject::Size(cage_base);
}
int HeapObject::Size(PtrComprCageBase cage_base) const {
return SizeFromMap(map(cage_base));
}
inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
return instance_type <= LAST_SPECIAL_RECEIVER_TYPE;
......
......@@ -2158,13 +2158,26 @@ void CallableTask::BriefPrintDetails(std::ostream& os) {
os << " callable=" << Brief(callable());
}
void HeapObject::Iterate(ObjectVisitor* v) { IterateFast<ObjectVisitor>(v); }
// TODO(v8:11880): drop this version if favor of cage friendly one.
void HeapObject::Iterate(ObjectVisitor* v) {
IterateFast<ObjectVisitor>(GetPtrComprCageBaseSlow(*this), v);
}
void HeapObject::Iterate(PtrComprCageBase cage_base, ObjectVisitor* v) {
IterateFast<ObjectVisitor>(cage_base, v);
}
// TODO(v8:11880): drop this version if favor of cage friendly one.
void HeapObject::IterateBody(ObjectVisitor* v) {
Map m = map();
IterateBodyFast<ObjectVisitor>(m, SizeFromMap(m), v);
}
void HeapObject::IterateBody(PtrComprCageBase cage_base, ObjectVisitor* v) {
Map m = map(cage_base);
IterateBodyFast<ObjectVisitor>(m, SizeFromMap(m), v);
}
void HeapObject::IterateBody(Map map, int object_size, ObjectVisitor* v) {
IterateBodyFast<ObjectVisitor>(map, object_size, v);
}
......
......@@ -239,16 +239,18 @@ static void CheckFindCodeObject(Isolate* isolate) {
__ nop(); // supported on all architectures
PtrComprCageBase cage_base(isolate);
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
CHECK(code->IsCode());
CHECK(code->IsCode(cage_base));
HeapObject obj = HeapObject::cast(*code);
Address obj_addr = obj.address();
for (int i = 0; i < obj.Size(); i += kTaggedSize) {
for (int i = 0; i < obj.Size(cage_base); i += kTaggedSize) {
Object found = isolate->FindCodeObject(obj_addr + i);
CHECK_EQ(*code, found);
}
......@@ -256,8 +258,8 @@ static void CheckFindCodeObject(Isolate* isolate) {
Handle<Code> copy =
Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
HeapObject obj_copy = HeapObject::cast(*copy);
Object not_right =
isolate->FindCodeObject(obj_copy.address() + obj_copy.Size() / 2);
Object not_right = isolate->FindCodeObject(obj_copy.address() +
obj_copy.Size(cage_base) / 2);
CHECK(not_right != *code);
}
......@@ -1971,12 +1973,13 @@ TEST(TestSizeOfObjectsVsHeapObjectIteratorPrecision) {
// are correct.
CcTest::heap()->DisableInlineAllocation();
HeapObjectIterator iterator(CcTest::heap());
PtrComprCageBase cage_base(CcTest::i_isolate());
intptr_t size_of_objects_1 = CcTest::heap()->SizeOfObjects();
intptr_t size_of_objects_2 = 0;
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
if (!obj.IsFreeSpace()) {
size_of_objects_2 += obj.Size();
if (!obj.IsFreeSpace(cage_base)) {
size_of_objects_2 += obj.Size(cage_base);
}
}
// Delta must be within 5% of the larger result.
......
......@@ -214,7 +214,7 @@ TEST_F(TracedReferenceTest, NoWriteBarrierOnConstruction) {
v8::Local<v8::Object> local =
v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state;
MarkCompactCollector::MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
auto ref =
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
......@@ -234,7 +234,7 @@ TEST_F(TracedReferenceTest, WriteBarrierOnHeapReset) {
v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
auto ref = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state;
MarkCompactCollector::MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
ref->Reset(v8_isolate(), local);
EXPECT_TRUE(state.IsGrey(HeapObject::cast(*Utils::OpenHandle(*local))));
......@@ -254,7 +254,7 @@ TEST_F(TracedReferenceTest, NoWriteBarrierOnStackReset) {
v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
v8::TracedReference<v8::Object> ref;
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state;
MarkCompactCollector::MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
ref.Reset(v8_isolate(), local);
EXPECT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
......@@ -274,7 +274,7 @@ TEST_F(TracedReferenceTest, WriteBarrierOnHeapCopy) {
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
auto ref_to = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state;
MarkCompactCollector::MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
*ref_to = *ref_from;
EXPECT_TRUE(!ref_from->IsEmpty());
......@@ -297,7 +297,7 @@ TEST_F(TracedReferenceTest, NoWriteBarrierOnStackCopy) {
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
v8::TracedReference<v8::Object> ref_to;
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state;
MarkCompactCollector::MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
ref_to = *ref_from;
EXPECT_TRUE(!ref_from->IsEmpty());
......@@ -318,7 +318,7 @@ TEST_F(TracedReferenceTest, WriteBarrierOnMove) {
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
auto ref_to = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state;
MarkCompactCollector::MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
*ref_to = std::move(*ref_from);
ASSERT_TRUE(ref_from->IsEmpty());
......@@ -341,7 +341,7 @@ TEST_F(TracedReferenceTest, NoWriteBarrierOnStackMove) {
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
v8::TracedReference<v8::Object> ref_to;
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state;
MarkCompactCollector::MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
ref_to = std::move(*ref_from);
ASSERT_TRUE(ref_from->IsEmpty());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment