Commit fff5ed12 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Compact map space with --compact-map-space

Enable compaction of objects in the map space during a full GC. So far
pages in the map space were never chosen as evacuation candidates. We
might be able to improve memory usage a bit by also compacting map
space. Luckily for us the marking barrier was already emitted when
updating an object's map word.

This CL adds a new flag FLAG_compact_map_space to easily turn off this
feature again. For now we keep this flag (and with that map space
compaction) disabled by default. So GC behavior does not change with
this CL.

Bug: v8:12578
Change-Id: I99c0cd826bd824af5383fb3ce64796693a59d1ff
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3404775Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78844}
parent e4311846
......@@ -1318,6 +1318,8 @@ DEFINE_BOOL(compact, true,
"Perform compaction on full GCs based on V8's default heuristics")
DEFINE_BOOL(compact_code_space, true,
"Perform code space compaction on full collections.")
DEFINE_BOOL(compact_map_space, false,
"Perform map space compaction on full collections.")
DEFINE_BOOL(compact_on_every_full_gc, false,
"Perform compaction on every full GC")
DEFINE_BOOL(compact_with_stack, true,
......
......@@ -88,6 +88,7 @@
#include "src/objects/feedback-vector.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/instance-type.h"
#include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-atomic-inl.h"
......@@ -7053,6 +7054,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
case CODE_SPACE:
return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
return dst == MAP_SPACE && type == MAP_TYPE;
case LO_SPACE:
case CODE_LO_SPACE:
case NEW_LO_SPACE:
......
......@@ -5,8 +5,8 @@
#ifndef V8_HEAP_LOCAL_ALLOCATOR_INL_H_
#define V8_HEAP_LOCAL_ALLOCATOR_INL_H_
#include "src/common/globals.h"
#include "src/heap/local-allocator.h"
#include "src/heap/spaces-inl.h"
namespace v8 {
......@@ -22,6 +22,9 @@ AllocationResult EvacuationAllocator::Allocate(AllocationSpace space,
case OLD_SPACE:
return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
alignment, origin);
case MAP_SPACE:
return compaction_spaces_.Get(MAP_SPACE)->AllocateRaw(object_size,
alignment, origin);
case CODE_SPACE:
return compaction_spaces_.Get(CODE_SPACE)
->AllocateRaw(object_size, alignment, origin);
......@@ -39,6 +42,9 @@ void EvacuationAllocator::FreeLast(AllocationSpace space, HeapObject object,
case OLD_SPACE:
FreeLastInOldSpace(object, object_size);
return;
case MAP_SPACE:
FreeLastInMapSpace(object, object_size);
return;
default:
// Only new and old space supported.
UNREACHABLE();
......@@ -64,6 +70,16 @@ void EvacuationAllocator::FreeLastInOldSpace(HeapObject object,
}
}
void EvacuationAllocator::FreeLastInMapSpace(HeapObject object,
int object_size) {
if (!compaction_spaces_.Get(MAP_SPACE)->TryFreeLast(object.address(),
object_size)) {
// We couldn't free the last object so we have to write a proper filler.
heap_->CreateFillerObjectAt(object.address(), object_size,
ClearRecordedSlots::kNo);
}
}
AllocationResult EvacuationAllocator::AllocateInLAB(
int object_size, AllocationAlignment alignment) {
AllocationResult allocation;
......
......@@ -35,6 +35,8 @@ class EvacuationAllocator {
heap_->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
heap_->code_space()->MergeCompactionSpace(
compaction_spaces_.Get(CODE_SPACE));
heap_->map_space()->MergeCompactionSpace(compaction_spaces_.Get(MAP_SPACE));
// Give back remaining LAB space if this EvacuationAllocator's new space LAB
// sits right next to new space allocation top.
const LinearAllocationArea info = new_space_lab_.CloseAndMakeIterable();
......@@ -56,6 +58,7 @@ class EvacuationAllocator {
AllocationAlignment alignment);
inline void FreeLastInNewSpace(HeapObject object, int object_size);
inline void FreeLastInOldSpace(HeapObject object, int object_size);
inline void FreeLastInMapSpace(HeapObject object, int object_size);
Heap* const heap_;
NewSpace* const new_space_;
......
......@@ -199,10 +199,9 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject black_object = HeapObject::FromAddress(addr);
Object map_object = black_object.map(cage_base, kAcquireLoad);
CHECK(map_object.IsMap(cage_base));
map = Map::cast(map_object);
DCHECK(map.IsMap(cage_base));
map = black_object.map(cage_base, kAcquireLoad);
// Map might be forwarded during GC.
DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map));
size = black_object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
Address end = addr + size - kTaggedSize;
......
......@@ -53,6 +53,7 @@
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/objects.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
......@@ -509,6 +510,17 @@ void MarkCompactCollector::TearDown() {
sweeper()->TearDown();
}
// static
bool MarkCompactCollector::IsMapOrForwardedMap(Map map) {
MapWord map_word = map.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress().IsMap();
} else {
return map_word.ToMap().IsMap();
}
}
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
......@@ -545,6 +557,10 @@ bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
CollectEvacuationCandidates(heap()->old_space());
if (FLAG_compact_map_space) {
CollectEvacuationCandidates(heap()->map_space());
}
if (FLAG_compact_code_space &&
(heap()->IsGCWithoutStack() || FLAG_compact_code_space_with_stack)) {
CollectEvacuationCandidates(heap()->code_space());
......@@ -741,7 +757,8 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
}
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
space->identity() == MAP_SPACE);
int number_of_pages = space->CountTotalPages();
size_t area_size = space->AreaSize();
......@@ -1362,6 +1379,10 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
p.address());
}
inline void VisitMapPointer(HeapObject host) final {
VisitPointer(host, host.map_slot());
}
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
RecordMigratedSlot(host, p.load(cage_base()), p.address());
......@@ -1535,10 +1556,19 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
// In case the object's map gets relocated during GC we load the old map
// here. This is fine since they store the same content.
dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
}
} else if (dest == MAP_SPACE) {
DCHECK_OBJECT_SIZE(size);
DCHECK(IsAligned(size, kTaggedSize));
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
base->heap_->CopyBlock(dst_addr, src_addr, size);
......@@ -1546,7 +1576,9 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
code.Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
// In case the object's map gets relocated during GC we load the old map
// here. This is fine since they store the same content.
dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
......@@ -1786,7 +1818,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
} else if (mode == NEW_TO_OLD) {
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
PtrComprCageBase cage_base = GetPtrComprCageBase(object);
object.IterateBodyFast(cage_base, record_visitor_);
object.IterateFast(cage_base, record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
record_visitor_->MarkArrayBufferExtensionPromoted(object);
}
......@@ -3122,14 +3154,17 @@ static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
typename TSlot::TObject target = MakeSlotValue<TSlot, reference_type>(
map_word.ToForwardingAddress(host_cage_base));
if (access_mode == AccessMode::NON_ATOMIC) {
slot.store(target);
// Needs to be atomic for map space compaction: This slot could be a map
// word which we update while loading the map word for updating the slot
// on another page.
slot.Relaxed_Store(target);
} else {
slot.Release_CompareAndSwap(old, target);
}
DCHECK(!Heap::InFromPage(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else {
DCHECK(heap_obj.map(cage_base).IsMap(cage_base));
DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map_word.ToMap()));
}
// OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
......
......@@ -511,6 +511,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
uint32_t offset;
};
static bool IsMapOrForwardedMap(Map map);
static bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
HeapObject target);
static RecordRelocSlotInfo ProcessRelocInfo(Code host, RelocInfo* rinfo,
......
......@@ -166,10 +166,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// ObjectVisitor overrides.
void VisitMapPointer(HeapObject host) final {
// Note that we are skipping the recording the slot because map objects
// can't move, so this is safe (see ProcessStrongHeapObject for comparison)
MarkObject(host, HeapObject::cast(
host.map(ObjectVisitorWithCageBases::cage_base())));
Map map = host.map(ObjectVisitorWithCageBases::cage_base());
MarkObject(host, map);
concrete_visitor()->RecordSlot(host, host.map_slot(), map);
}
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
......
......@@ -487,6 +487,8 @@ class CompactionSpaceCollection : public Malloced {
CompactionSpaceKind compaction_space_kind)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
compaction_space_kind),
map_space_(heap, MAP_SPACE, Executability::NOT_EXECUTABLE,
compaction_space_kind),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
compaction_space_kind) {}
......@@ -494,6 +496,8 @@ class CompactionSpaceCollection : public Malloced {
switch (space) {
case OLD_SPACE:
return &old_space_;
case MAP_SPACE:
return &map_space_;
case CODE_SPACE:
return &code_space_;
default:
......@@ -504,6 +508,7 @@ class CompactionSpaceCollection : public Malloced {
private:
CompactionSpace old_space_;
CompactionSpace map_space_;
CompactionSpace code_space_;
};
......
......@@ -387,7 +387,8 @@ int Sweeper::RawSweep(
&old_to_new_cleanup);
}
Map map = object.map(cage_base, kAcquireLoad);
DCHECK(map.IsMap(cage_base));
// Map might be forwarded during GC.
DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map));
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;
......
......@@ -126,6 +126,9 @@ class HeapObject : public Object {
template <typename ObjectVisitor>
inline void IterateFast(PtrComprCageBase cage_base, ObjectVisitor* v);
template <typename ObjectVisitor>
inline void IterateFast(Map map, int object_size, ObjectVisitor* v);
// Iterates over all pointers contained in the object except the
// first map pointer. The object type is given in the first
// parameter. This function does not access the map pointer in the
......
......@@ -1301,6 +1301,12 @@ void HeapObject::IterateFast(PtrComprCageBase cage_base, ObjectVisitor* v) {
IterateBodyFast(cage_base, v);
}
template <typename ObjectVisitor>
void HeapObject::IterateFast(Map map, int object_size, ObjectVisitor* v) {
v->VisitMapPointer(*this);
IterateBodyFast(map, object_size, v);
}
template <typename ObjectVisitor>
void HeapObject::IterateBodyFast(PtrComprCageBase cage_base, ObjectVisitor* v) {
Map m = map(cage_base);
......
......@@ -801,9 +801,7 @@ void HeapObject::set_map(Map value) {
set_map_word(MapWord::FromMap(value), kRelaxedStore);
#ifndef V8_DISABLE_WRITE_BARRIERS
if (!value.is_null()) {
// TODO(1600) We are passing kNullAddress as a slot because maps can never
// be on an evacuation candidate.
WriteBarrier::Marking(*this, ObjectSlot(kNullAddress), value);
WriteBarrier::Marking(*this, map_slot(), value);
}
#endif
}
......@@ -821,9 +819,7 @@ void HeapObject::set_map(Map value, ReleaseStoreTag tag) {
set_map_word(MapWord::FromMap(value), tag);
#ifndef V8_DISABLE_WRITE_BARRIERS
if (!value.is_null()) {
// TODO(1600) We are passing kNullAddress as a slot because maps can never
// be on an evacuation candidate.
WriteBarrier::Marking(*this, ObjectSlot(kNullAddress), value);
WriteBarrier::Marking(*this, map_slot(), value);
}
#endif
}
......@@ -855,9 +851,7 @@ void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
#ifndef V8_DISABLE_WRITE_BARRIERS
if (mode != SKIP_WRITE_BARRIER) {
DCHECK(!value.is_null());
// TODO(1600) We are passing kNullAddress as a slot because maps can never
// be on an evacuation candidate.
WriteBarrier::Marking(*this, ObjectSlot(kNullAddress), value);
WriteBarrier::Marking(*this, map_slot(), value);
} else {
SLOW_DCHECK(!WriteBarrier::IsRequired(*this, value));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment