Commit 6b5b3a5a authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Promote surviving young generation large objects in the Scavenger.

Surviving large objects are directly promoted to the old generation.

Bug: chromium:852420
Change-Id: I460649714544d4338e01085f487d4b70065ecfb5
Reviewed-on: https://chromium-review.googlesource.com/1238173
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56237}
parent 52a9e67a
......@@ -410,7 +410,8 @@
F(SCAVENGER_SCAVENGE_PARALLEL) \
F(SCAVENGER_SCAVENGE_ROOTS) \
F(SCAVENGER_SCAVENGE_UPDATE_REFS) \
F(SCAVENGER_SCAVENGE_WEAK)
F(SCAVENGER_SCAVENGE_WEAK) \
F(SCAVENGER_SCAVENGE_FINALIZE)
#define TRACER_BACKGROUND_SCOPES(F) \
F(BACKGROUND_ARRAY_BUFFER_FREE) \
......
......@@ -1918,6 +1918,10 @@ void Heap::Scavenge() {
new_space()->Flip();
new_space()->ResetLinearAllocationArea();
// We also flip the young generation large object space. All large objects
// will be in the from space.
new_lo_space()->Flip();
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));
......
......@@ -15,6 +15,81 @@
namespace v8 {
namespace internal {
void Scavenger::PromotionList::View::PushRegularObject(HeapObject* object,
int size) {
promotion_list_->PushRegularObject(task_id_, object, size);
}
void Scavenger::PromotionList::View::PushLargeObject(HeapObject* object,
Map* map, int size) {
promotion_list_->PushLargeObject(task_id_, object, map, size);
}
bool Scavenger::PromotionList::View::IsEmpty() {
return promotion_list_->IsEmpty();
}
size_t Scavenger::PromotionList::View::LocalPushSegmentSize() {
return promotion_list_->LocalPushSegmentSize(task_id_);
}
bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
return promotion_list_->Pop(task_id_, entry);
}
bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
return promotion_list_->IsGlobalPoolEmpty();
}
bool Scavenger::PromotionList::View::ShouldEagerlyProcessPromotionList() {
return promotion_list_->ShouldEagerlyProcessPromotionList(task_id_);
}
void Scavenger::PromotionList::PushRegularObject(int task_id,
HeapObject* object, int size) {
regular_object_promotion_list_.Push(task_id, ObjectAndSize(object, size));
}
void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject* object,
Map* map, int size) {
large_object_promotion_list_.Push(task_id, {object, map, size});
}
bool Scavenger::PromotionList::IsEmpty() {
return regular_object_promotion_list_.IsEmpty() &&
large_object_promotion_list_.IsEmpty();
}
size_t Scavenger::PromotionList::LocalPushSegmentSize(int task_id) {
return regular_object_promotion_list_.LocalPushSegmentSize(task_id) +
large_object_promotion_list_.LocalPushSegmentSize(task_id);
}
bool Scavenger::PromotionList::Pop(int task_id,
struct PromotionListEntry* entry) {
ObjectAndSize regular_object;
if (regular_object_promotion_list_.Pop(task_id, &regular_object)) {
entry->heap_object = regular_object.first;
entry->size = regular_object.second;
entry->map = entry->heap_object->map();
return true;
}
return large_object_promotion_list_.Pop(task_id, entry);
}
bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
large_object_promotion_list_.IsGlobalPoolEmpty();
}
bool Scavenger::PromotionList::ShouldEagerlyProcessPromotionList(int task_id) {
// Threshold when to prioritize processing of the promotion list. Right
// now we only look into the regular object list.
const int kProcessPromotionListThreshold =
kRegularObjectPromotionListSegmentSize / 2;
return LocalPushSegmentSize(task_id) < kProcessPromotionListThreshold;
}
// White list for objects that for sure only contain data.
bool Scavenger::ContainsOnlyData(VisitorId visitor_id) {
switch (visitor_id) {
......@@ -128,7 +203,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map* map,
}
HeapObjectReference::Update(slot, target);
if (!ContainsOnlyData(map->visitor_id())) {
promotion_list_.Push(ObjectAndSize(target, object_size));
promotion_list_.PushRegularObject(target, object_size);
}
promoted_size_ += object_size;
return CopyAndForwardResult::SUCCESS_OLD_GENERATION;
......@@ -143,6 +218,26 @@ SlotCallbackResult Scavenger::RememberedSetEntryNeeded(
: REMOVE_SLOT;
}
bool Scavenger::HandleLargeObject(Map* map, HeapObject* object,
int object_size) {
if (V8_UNLIKELY(FLAG_young_generation_large_objects &&
object_size > kMaxNewSpaceHeapObjectSize)) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner()->identity());
if (base::AsAtomicPointer::Release_CompareAndSwap(
reinterpret_cast<HeapObject**>(object->address()), map,
MapWord::FromForwardingAddress(object).ToMap()) == map) {
surviving_new_large_objects_.insert({object, map});
if (!ContainsOnlyData(map->visitor_id())) {
promotion_list_.PushLargeObject(object, map, object_size);
}
}
return true;
}
return false;
}
SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
HeapObjectReference** slot,
HeapObject* object,
......@@ -151,6 +246,10 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
SLOW_DCHECK(object->SizeFromMap(map) == object_size);
CopyAndForwardResult result;
if (HandleLargeObject(map, object, object_size)) {
return REMOVE_SLOT;
}
if (!heap()->ShouldBePromoted(object->address())) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
......
......@@ -166,8 +166,8 @@ void ScavengerCollector::CollectGarbage() {
Scavenger::CopiedList copied_list(num_scavenge_tasks);
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i] =
new Scavenger(heap_, is_logging, &copied_list, &promotion_list, i);
scavengers[i] = new Scavenger(this, heap_, is_logging, &copied_list,
&promotion_list, i);
job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
}
......@@ -228,9 +228,16 @@ void ScavengerCollector::CollectGarbage() {
&root_scavenge_visitor, &IsUnscavengedHeapObject);
}
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i]->Finalize();
delete scavengers[i];
{
// Finalize parallel scavenging.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i]->Finalize();
delete scavengers[i];
}
HandleSurvivingNewLargeObjects();
}
}
......@@ -280,6 +287,29 @@ void ScavengerCollector::CollectGarbage() {
heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
for (SurvivingNewLargeObjectMapEntry update_info :
surviving_new_large_objects_) {
HeapObject* object = update_info.first;
Map* map = update_info.second;
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
object->set_map_word(MapWord::FromMap(map));
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
DCHECK(heap_->new_lo_space()->IsEmpty());
}
void ScavengerCollector::MergeSurvivingNewLargeObjects(
const SurvivingNewLargeObjectsMap& objects) {
for (SurvivingNewLargeObjectMapEntry object : objects) {
bool success = surviving_new_large_objects_.insert(object).second;
USE(success);
DCHECK(success);
}
}
int ScavengerCollector::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
......@@ -295,9 +325,11 @@ int ScavengerCollector::NumberOfScavengeTasks() {
return tasks;
}
Scavenger::Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
PromotionList* promotion_list, int task_id)
: heap_(heap),
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
CopiedList* copied_list, PromotionList* promotion_list,
int task_id)
: collector_(collector),
heap_(heap),
promotion_list_(promotion_list, task_id),
copied_list_(copied_list, task_id),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
......@@ -308,7 +340,8 @@ Scavenger::Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()) {}
void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, Map* map,
int size) {
// We are not collecting slots on new space objects during mutation thus we
// have to scan for pointers to evacuation candidates when we promote
// objects. But we should not record any slots in non-black objects. Grey
......@@ -319,7 +352,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
is_compacting_ &&
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
target->IterateBodyFast(target->map(), size, &visitor);
target->IterateBodyFast(map, size, &visitor);
}
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
......@@ -352,9 +385,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
void Scavenger::Process(OneshotBarrier* barrier) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::Process");
// Threshold when to switch processing the promotion list to avoid
// allocating too much backing store in the worklist.
const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
ScavengeVisitor scavenge_visitor(this);
const bool have_barrier = barrier != nullptr;
......@@ -363,8 +393,7 @@ void Scavenger::Process(OneshotBarrier* barrier) {
do {
done = true;
ObjectAndSize object_and_size;
while ((promotion_list_.LocalPushSegmentSize() <
kProcessPromotionListThreshold) &&
while (promotion_list_.ShouldEagerlyProcessPromotionList() &&
copied_list_.Pop(&object_and_size)) {
scavenge_visitor.Visit(object_and_size.first);
done = false;
......@@ -375,11 +404,11 @@ void Scavenger::Process(OneshotBarrier* barrier) {
}
}
while (promotion_list_.Pop(&object_and_size)) {
HeapObject* target = object_and_size.first;
int size = object_and_size.second;
struct PromotionListEntry entry;
while (promotion_list_.Pop(&entry)) {
HeapObject* target = entry.heap_object;
DCHECK(!target->IsMap());
IterateAndScavengePromotedObject(target, size);
IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
if (!promotion_list_.IsGlobalPoolEmpty()) {
......@@ -394,6 +423,7 @@ void Scavenger::Finalize() {
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
}
......
......@@ -22,6 +22,10 @@ enum class CopyAndForwardResult {
FAILURE
};
using ObjectAndSize = std::pair<HeapObject*, int>;
using SurvivingNewLargeObjectsMap = std::unordered_map<HeapObject*, Map*>;
using SurvivingNewLargeObjectMapEntry = std::pair<HeapObject*, Map*>;
class ScavengerCollector {
public:
static const int kMaxScavengerTasks = 8;
......@@ -31,24 +35,82 @@ class ScavengerCollector {
void CollectGarbage();
private:
void MergeSurvivingNewLargeObjects(
const SurvivingNewLargeObjectsMap& objects);
int NumberOfScavengeTasks();
void HandleSurvivingNewLargeObjects();
Isolate* const isolate_;
Heap* const heap_;
base::Semaphore parallel_scavenge_semaphore_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
friend class Scavenger;
};
class Scavenger {
public:
struct PromotionListEntry {
HeapObject* heap_object;
Map* map;
int size;
};
class PromotionList {
public:
class View {
public:
View(PromotionList* promotion_list, int task_id)
: promotion_list_(promotion_list), task_id_(task_id) {}
inline void PushRegularObject(HeapObject* object, int size);
inline void PushLargeObject(HeapObject* object, Map* map, int size);
inline bool IsEmpty();
inline size_t LocalPushSegmentSize();
inline bool Pop(struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList();
private:
PromotionList* promotion_list_;
int task_id_;
};
explicit PromotionList(int num_tasks)
: regular_object_promotion_list_(num_tasks),
large_object_promotion_list_(num_tasks) {}
inline void PushRegularObject(int task_id, HeapObject* object, int size);
inline void PushLargeObject(int task_id, HeapObject* object, Map* map,
int size);
inline bool IsEmpty();
inline size_t LocalPushSegmentSize(int task_id);
inline bool Pop(int task_id, struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList(int task_id);
private:
static const int kRegularObjectPromotionListSegmentSize = 256;
static const int kLargeObjectPromotionListSegmentSize = 4;
using RegularObjectPromotionList =
Worklist<ObjectAndSize, kRegularObjectPromotionListSegmentSize>;
using LargeObjectPromotionList =
Worklist<PromotionListEntry, kLargeObjectPromotionListSegmentSize>;
RegularObjectPromotionList regular_object_promotion_list_;
LargeObjectPromotionList large_object_promotion_list_;
};
static const int kCopiedListSegmentSize = 256;
static const int kPromotionListSegmentSize = 256;
using ObjectAndSize = std::pair<HeapObject*, int>;
using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
using PromotionList = Worklist<ObjectAndSize, kPromotionListSegmentSize>;
Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
PromotionList* promotion_list, int task_id);
Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
CopiedList* copied_list, PromotionList* promotion_list,
int task_id);
// Entry point for scavenging an old generation page. For scavenging single
// objects see RootScavengingVisitor and ScavengeVisitor below.
......@@ -106,6 +168,9 @@ class Scavenger {
V8_INLINE SlotCallbackResult EvacuateObject(HeapObjectReference** slot,
Map* map, HeapObject* source);
V8_INLINE bool HandleLargeObject(Map* map, HeapObject* object,
int object_size);
// Different cases for object evacuation.
V8_INLINE SlotCallbackResult EvacuateObjectDefault(Map* map,
HeapObjectReference** slot,
......@@ -121,10 +186,11 @@ class Scavenger {
ConsString* object,
int object_size);
void IterateAndScavengePromotedObject(HeapObject* target, int size);
void IterateAndScavengePromotedObject(HeapObject* target, Map* map, int size);
static inline bool ContainsOnlyData(VisitorId visitor_id);
ScavengerCollector* const collector_;
Heap* const heap_;
PromotionList::View promotion_list_;
CopiedList::View copied_list_;
......@@ -132,6 +198,7 @@ class Scavenger {
size_t copied_size_;
size_t promoted_size_;
LocalAllocator allocator_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
const bool is_logging_;
const bool is_incremental_marking_;
const bool is_compacting_;
......
......@@ -3310,13 +3310,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
size_ += static_cast<int>(page->size());
AccountCommitted(page->size());
objects_size_ += object_size;
page_count_++;
memory_chunk_list_.PushBack(page);
InsertChunkMapEntries(page);
Register(page, object_size);
HeapObject* object = page->GetObject();
......@@ -3409,6 +3403,39 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
}
}
void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
DCHECK(page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
DCHECK(!page->IsFlagSet(MemoryChunk::IN_TO_SPACE));
size_t object_size = static_cast<size_t>(page->GetObject()->Size());
reinterpret_cast<NewLargeObjectSpace*>(page->owner())
->Unregister(page, object_size);
Register(page, object_size);
page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->set_owner(this);
}
void LargeObjectSpace::Register(LargePage* page, size_t object_size) {
size_ += static_cast<int>(page->size());
AccountCommitted(page->size());
objects_size_ += object_size;
page_count_++;
memory_chunk_list_.PushBack(page);
InsertChunkMapEntries(page);
}
void LargeObjectSpace::Unregister(LargePage* page, size_t object_size) {
size_ -= static_cast<int>(page->size());
AccountUncommitted(page->size());
objects_size_ -= object_size;
page_count_--;
memory_chunk_list_.Remove(page);
RemoveChunkMapEntries(page);
}
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
......@@ -3596,5 +3623,13 @@ size_t NewLargeObjectSpace::Available() {
// TODO(hpayer): Update as soon as we have a growing strategy.
return 0;
}
void NewLargeObjectSpace::Flip() {
for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
chunk->SetFlag(MemoryChunk::IN_FROM_SPACE);
chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
}
}
} // namespace internal
} // namespace v8
......@@ -902,6 +902,16 @@ class ReadOnlyPage : public Page {
class LargePage : public MemoryChunk {
public:
// A limit to guarantee that we do not overflow typed slot offset in
// the old to old remembered set.
// Note that this limit is higher than what assembler already imposes on
// x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
static LargePage* FromHeapObject(const HeapObject* o) {
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
inline LargePage* next_page() {
......@@ -914,12 +924,6 @@ class LargePage : public MemoryChunk {
void ClearOutOfLiveRangeSlots(Address free_start);
// A limit to guarantee that we do not overflow typed slot offset in
// the old to old remembered set.
// Note that this limit is higher than what assembler already imposes on
// x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
private:
static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable);
......@@ -3007,6 +3011,8 @@ class LargeObjectSpace : public Space {
void RemoveChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page, Address free_start);
void PromoteNewLargeObject(LargePage* page);
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject* obj);
// Checks whether an address is in the object area in this space. Iterates
......@@ -3016,6 +3022,9 @@ class LargeObjectSpace : public Space {
// Checks whether the space is empty.
bool IsEmpty() { return first_page() == nullptr; }
void Register(LargePage* page, size_t object_size);
void Unregister(LargePage* page, size_t object_size);
LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page());
}
......@@ -3064,6 +3073,8 @@ class NewLargeObjectSpace : public LargeObjectSpace {
// Available bytes for objects in this space.
size_t Available() override;
void Flip();
};
class LargeObjectIterator : public ObjectIterator {
......
......@@ -5675,6 +5675,7 @@ TEST(Regress618958) {
}
TEST(YoungGenerationLargeObjectAllocation) {
if (FLAG_minor_mc) return;
FLAG_young_generation_large_objects = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
......@@ -5683,13 +5684,26 @@ TEST(YoungGenerationLargeObjectAllocation) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
CHECK(chunk->owner()->identity() == LO_SPACE);
CHECK_EQ(LO_SPACE, chunk->owner()->identity());
CHECK(!chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(20000);
chunk = MemoryChunk::FromAddress(array_small->address());
CHECK(chunk->owner()->identity() == NEW_LO_SPACE);
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
Handle<Object> number = isolate->factory()->NewHeapNumber(123.456);
array_small->set(0, *number);
CcTest::CollectGarbage(NEW_SPACE);
// After the first young generation GC array_small will be in the old
// generation large object space.
chunk = MemoryChunk::FromAddress(array_small->address());
CHECK_EQ(LO_SPACE, chunk->owner()->identity());
CHECK(!chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
CcTest::CollectAllAvailableGarbage();
}
TEST(UncommitUnusedLargeObjectMemory) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment