Commit 234d4307 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Refactor and simplify pretenuring infrastructure

Bug: 
Change-Id: I81132af45d8fb649d4239fa0e0ef75b95e148208
Reviewed-on: https://chromium-review.googlesource.com/633604
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47606}
parent 08bfcb29
...@@ -518,9 +518,9 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) { ...@@ -518,9 +518,9 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
UNREACHABLE(); UNREACHABLE();
} }
template <Heap::UpdateAllocationSiteMode mode>
void Heap::UpdateAllocationSite(Map* map, HeapObject* object, void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
base::HashMap* pretenuring_feedback) { PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
DCHECK(InFromSpace(object) || DCHECK(InFromSpace(object) ||
(InToSpace(object) && (InToSpace(object) &&
Page::FromAddress(object->address()) Page::FromAddress(object->address())
...@@ -535,37 +535,16 @@ void Heap::UpdateAllocationSite(Map* map, HeapObject* object, ...@@ -535,37 +535,16 @@ void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
FindAllocationMemento<kForGC>(map, object); FindAllocationMemento<kForGC>(map, object);
if (memento_candidate == nullptr) return; if (memento_candidate == nullptr) return;
if (mode == kGlobal) { // Entering cached feedback is used in the parallel case. We are not allowed
DCHECK_EQ(pretenuring_feedback, global_pretenuring_feedback_); // to dereference the allocation site and rather have to postpone all checks
// Entering global pretenuring feedback is only used in the scavenger, where // till actually merging the data.
// we are allowed to actually touch the allocation site. Address key = memento_candidate->GetAllocationSiteUnchecked();
if (!memento_candidate->IsValid()) return; (*pretenuring_feedback)[reinterpret_cast<AllocationSite*>(key)]++;
AllocationSite* site = memento_candidate->GetAllocationSite();
DCHECK(!site->IsZombie());
// For inserting in the global pretenuring storage we need to first
// increment the memento found count on the allocation site.
if (site->IncrementMementoFoundCount()) {
global_pretenuring_feedback_->LookupOrInsert(site,
ObjectHash(site->address()));
}
} else {
DCHECK_EQ(mode, kCached);
DCHECK_NE(pretenuring_feedback, global_pretenuring_feedback_);
// Entering cached feedback is used in the parallel case. We are not allowed
// to dereference the allocation site and rather have to postpone all checks
// till actually merging the data.
Address key = memento_candidate->GetAllocationSiteUnchecked();
base::HashMap::Entry* e =
pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
DCHECK(e != nullptr);
(*bit_cast<intptr_t*>(&e->value))++;
}
} }
void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) { void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
global_pretenuring_feedback_->Remove( global_pretenuring_feedback_.erase(site);
site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
} }
Isolate* Heap::isolate() { Isolate* Heap::isolate() {
......
...@@ -164,7 +164,7 @@ Heap::Heap() ...@@ -164,7 +164,7 @@ Heap::Heap()
new_space_allocation_counter_(0), new_space_allocation_counter_(0),
old_generation_allocation_counter_at_last_gc_(0), old_generation_allocation_counter_at_last_gc_(0),
old_generation_size_at_last_gc_(0), old_generation_size_at_last_gc_(0),
global_pretenuring_feedback_(nullptr), global_pretenuring_feedback_(kInitialFeedbackCapacity),
is_marking_flag_(false), is_marking_flag_(false),
ring_buffer_full_(false), ring_buffer_full_(false),
ring_buffer_end_(0), ring_buffer_end_(0),
...@@ -603,13 +603,11 @@ void Heap::RepairFreeListsAfterDeserialization() { ...@@ -603,13 +603,11 @@ void Heap::RepairFreeListsAfterDeserialization() {
} }
void Heap::MergeAllocationSitePretenuringFeedback( void Heap::MergeAllocationSitePretenuringFeedback(
const base::HashMap& local_pretenuring_feedback) { const PretenuringFeedbackMap& local_pretenuring_feedback) {
AllocationSite* site = nullptr; AllocationSite* site = nullptr;
for (base::HashMap::Entry* local_entry = local_pretenuring_feedback.Start(); for (auto& site_and_count : local_pretenuring_feedback) {
local_entry != nullptr; site = site_and_count.first;
local_entry = local_pretenuring_feedback.Next(local_entry)) { MapWord map_word = site_and_count.first->map_word();
site = reinterpret_cast<AllocationSite*>(local_entry->key);
MapWord map_word = site->map_word();
if (map_word.IsForwardingAddress()) { if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress()); site = AllocationSite::cast(map_word.ToForwardingAddress());
} }
...@@ -619,13 +617,11 @@ void Heap::MergeAllocationSitePretenuringFeedback( ...@@ -619,13 +617,11 @@ void Heap::MergeAllocationSitePretenuringFeedback(
// This is an inlined check of AllocationMemento::IsValid. // This is an inlined check of AllocationMemento::IsValid.
if (!site->IsAllocationSite() || site->IsZombie()) continue; if (!site->IsAllocationSite() || site->IsZombie()) continue;
int value = const int value = static_cast<int>(site_and_count.second);
static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value)); DCHECK_LT(0, value);
DCHECK_GT(value, 0);
if (site->IncrementMementoFoundCount(value)) { if (site->IncrementMementoFoundCount(value)) {
global_pretenuring_feedback_->LookupOrInsert(site, // For sites in the global map the count is accessed through the site.
ObjectHash(site->address())); global_pretenuring_feedback_.insert(std::make_pair(site, 0));
} }
} }
} }
...@@ -647,22 +643,6 @@ class Heap::SkipStoreBufferScope { ...@@ -647,22 +643,6 @@ class Heap::SkipStoreBufferScope {
StoreBuffer* store_buffer_; StoreBuffer* store_buffer_;
}; };
class Heap::PretenuringScope {
public:
explicit PretenuringScope(Heap* heap) : heap_(heap) {
heap_->global_pretenuring_feedback_ =
new base::HashMap(kInitialFeedbackCapacity);
}
~PretenuringScope() {
delete heap_->global_pretenuring_feedback_;
heap_->global_pretenuring_feedback_ = nullptr;
}
private:
Heap* heap_;
};
namespace { namespace {
inline bool MakePretenureDecision( inline bool MakePretenureDecision(
AllocationSite* site, AllocationSite::PretenureDecision current_decision, AllocationSite* site, AllocationSite::PretenureDecision current_decision,
...@@ -736,10 +716,11 @@ void Heap::ProcessPretenuringFeedback() { ...@@ -736,10 +716,11 @@ void Heap::ProcessPretenuringFeedback() {
// Step 1: Digest feedback for recorded allocation sites. // Step 1: Digest feedback for recorded allocation sites.
bool maximum_size_scavenge = MaximumSizeScavenge(); bool maximum_size_scavenge = MaximumSizeScavenge();
for (base::HashMap::Entry* e = global_pretenuring_feedback_->Start(); for (auto& site_and_count : global_pretenuring_feedback_) {
e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
allocation_sites++; allocation_sites++;
site = reinterpret_cast<AllocationSite*>(e->key); site = site_and_count.first;
// Count is always access through the site.
DCHECK_EQ(0, site_and_count.second);
int found_count = site->memento_found_count(); int found_count = site->memento_found_count();
// An entry in the storage does not imply that the count is > 0 because // An entry in the storage does not imply that the count is > 0 because
// allocation sites might have been reset due to too many objects dying // allocation sites might have been reset due to too many objects dying
...@@ -790,6 +771,9 @@ void Heap::ProcessPretenuringFeedback() { ...@@ -790,6 +771,9 @@ void Heap::ProcessPretenuringFeedback() {
active_allocation_sites, allocation_mementos_found, active_allocation_sites, allocation_mementos_found,
tenure_decisions, dont_tenure_decisions); tenure_decisions, dont_tenure_decisions);
} }
global_pretenuring_feedback_.clear();
global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
} }
} }
...@@ -1516,7 +1500,6 @@ bool Heap::PerformGarbageCollection( ...@@ -1516,7 +1500,6 @@ bool Heap::PerformGarbageCollection(
int start_new_space_size = static_cast<int>(Heap::new_space()->Size()); int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
{ {
Heap::PretenuringScope pretenuring_scope(this);
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_); Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
switch (collector) { switch (collector) {
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define V8_HEAP_HEAP_H_ #define V8_HEAP_HEAP_H_
#include <cmath> #include <cmath>
#include <unordered_map>
#include <vector> #include <vector>
// Clients of this interface shouldn't depend on lots of heap internals. // Clients of this interface shouldn't depend on lots of heap internals.
...@@ -554,7 +555,7 @@ class Heap { ...@@ -554,7 +555,7 @@ class Heap {
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT, MINOR_MARK_COMPACT }; enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT, MINOR_MARK_COMPACT };
enum UpdateAllocationSiteMode { kGlobal, kCached }; using PretenuringFeedbackMap = std::unordered_map<AllocationSite*, size_t>;
// Taking this mutex prevents the GC from entering a phase that relocates // Taking this mutex prevents the GC from entering a phase that relocates
// object references. // object references.
...@@ -1473,14 +1474,11 @@ class Heap { ...@@ -1473,14 +1474,11 @@ class Heap {
// Allocation site tracking. ================================================= // Allocation site tracking. =================================================
// =========================================================================== // ===========================================================================
// Updates the AllocationSite of a given {object}. If the global prenuring // Updates the AllocationSite of a given {object}. The entry (including the
// storage is passed as {pretenuring_feedback} the memento found count on // count) is cached on the local pretenuring feedback.
// the corresponding allocation site is immediately updated and an entry inline void UpdateAllocationSite(
// in the hash map is created. Otherwise the entry (including a the count Map* map, HeapObject* object,
// value) is cached on the local pretenuring feedback. PretenuringFeedbackMap* pretenuring_feedback);
template <UpdateAllocationSiteMode mode>
inline void UpdateAllocationSite(Map* map, HeapObject* object,
base::HashMap* pretenuring_feedback);
// Removes an entry from the global pretenuring storage. // Removes an entry from the global pretenuring storage.
inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site); inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
...@@ -1489,7 +1487,7 @@ class Heap { ...@@ -1489,7 +1487,7 @@ class Heap {
// method needs to be called after evacuation, as allocation sites may be // method needs to be called after evacuation, as allocation sites may be
// evacuated and this method resolves forward pointers accordingly. // evacuated and this method resolves forward pointers accordingly.
void MergeAllocationSitePretenuringFeedback( void MergeAllocationSitePretenuringFeedback(
const base::HashMap& local_pretenuring_feedback); const PretenuringFeedbackMap& local_pretenuring_feedback);
// =========================================================================== // ===========================================================================
// Retaining path tracking. ================================================== // Retaining path tracking. ==================================================
...@@ -2375,7 +2373,7 @@ class Heap { ...@@ -2375,7 +2373,7 @@ class Heap {
// storage is only alive temporary during a GC. The invariant is that all // storage is only alive temporary during a GC. The invariant is that all
// pointers in this map are already fixed, i.e., they do not point to // pointers in this map are already fixed, i.e., they do not point to
// forwarding pointers. // forwarding pointers.
base::HashMap* global_pretenuring_feedback_; PretenuringFeedbackMap global_pretenuring_feedback_;
char trace_ring_buffer_[kTraceRingBufferSize]; char trace_ring_buffer_[kTraceRingBufferSize];
......
...@@ -1680,9 +1680,10 @@ class EvacuateVisitorBase : public HeapObjectVisitor { ...@@ -1680,9 +1680,10 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
public: public:
explicit EvacuateNewSpaceVisitor(Heap* heap, LocalAllocator* local_allocator, explicit EvacuateNewSpaceVisitor(
RecordMigratedSlotVisitor* record_visitor, Heap* heap, LocalAllocator* local_allocator,
base::HashMap* local_pretenuring_feedback) RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
: EvacuateVisitorBase(heap, local_allocator, record_visitor), : EvacuateVisitorBase(heap, local_allocator, record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()), buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0), promoted_size_(0),
...@@ -1696,8 +1697,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { ...@@ -1696,8 +1697,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
promoted_size_ += size; promoted_size_ += size;
return true; return true;
} }
heap_->UpdateAllocationSite<Heap::kCached>(object->map(), object, heap_->UpdateAllocationSite(object->map(), object,
local_pretenuring_feedback_); local_pretenuring_feedback_);
HeapObject* target = nullptr; HeapObject* target = nullptr;
AllocationSpace space = AllocateTargetObject(object, size, &target); AllocationSpace space = AllocateTargetObject(object, size, &target);
MigrateObject(HeapObject::cast(target), object, size, space); MigrateObject(HeapObject::cast(target), object, size, space);
...@@ -1739,7 +1740,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { ...@@ -1739,7 +1740,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
LocalAllocationBuffer buffer_; LocalAllocationBuffer buffer_;
intptr_t promoted_size_; intptr_t promoted_size_;
intptr_t semispace_copied_size_; intptr_t semispace_copied_size_;
base::HashMap* local_pretenuring_feedback_; Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
}; };
template <PageEvacuationMode mode> template <PageEvacuationMode mode>
...@@ -1747,7 +1748,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor { ...@@ -1747,7 +1748,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
public: public:
explicit EvacuateNewSpacePageVisitor( explicit EvacuateNewSpacePageVisitor(
Heap* heap, RecordMigratedSlotVisitor* record_visitor, Heap* heap, RecordMigratedSlotVisitor* record_visitor,
base::HashMap* local_pretenuring_feedback) Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
: heap_(heap), : heap_(heap),
record_visitor_(record_visitor), record_visitor_(record_visitor),
moved_bytes_(0), moved_bytes_(0),
...@@ -1771,8 +1772,8 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor { ...@@ -1771,8 +1772,8 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
inline bool Visit(HeapObject* object, int size) { inline bool Visit(HeapObject* object, int size) {
if (mode == NEW_TO_NEW) { if (mode == NEW_TO_NEW) {
heap_->UpdateAllocationSite<Heap::kCached>(object->map(), object, heap_->UpdateAllocationSite(object->map(), object,
local_pretenuring_feedback_); local_pretenuring_feedback_);
} else if (mode == NEW_TO_OLD) { } else if (mode == NEW_TO_OLD) {
object->IterateBodyFast(record_visitor_); object->IterateBodyFast(record_visitor_);
} }
...@@ -1786,7 +1787,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor { ...@@ -1786,7 +1787,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
Heap* heap_; Heap* heap_;
RecordMigratedSlotVisitor* record_visitor_; RecordMigratedSlotVisitor* record_visitor_;
intptr_t moved_bytes_; intptr_t moved_bytes_;
base::HashMap* local_pretenuring_feedback_; Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
}; };
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
...@@ -3290,7 +3291,7 @@ class Evacuator : public Malloced { ...@@ -3290,7 +3291,7 @@ class Evacuator : public Malloced {
// Locally cached collector data. // Locally cached collector data.
LocalAllocator local_allocator_; LocalAllocator local_allocator_;
CompactionSpaceCollection compaction_spaces_; CompactionSpaceCollection compaction_spaces_;
base::HashMap local_pretenuring_feedback_; Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
// Visitors for the corresponding spaces. // Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_; EvacuateNewSpaceVisitor new_space_visitor_;
......
...@@ -70,8 +70,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target, ...@@ -70,8 +70,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
if (is_incremental_marking_) { if (is_incremental_marking_) {
heap()->incremental_marking()->TransferColor(source, target); heap()->incremental_marking()->TransferColor(source, target);
} }
heap()->UpdateAllocationSite<Heap::kCached>(map, source, heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
&local_pretenuring_feedback_);
return true; return true;
} }
......
...@@ -129,7 +129,7 @@ class Scavenger { ...@@ -129,7 +129,7 @@ class Scavenger {
Heap* const heap_; Heap* const heap_;
PromotionList::View promotion_list_; PromotionList::View promotion_list_;
CopiedList::View copied_list_; CopiedList::View copied_list_;
base::HashMap local_pretenuring_feedback_; Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
size_t copied_size_; size_t copied_size_;
size_t promoted_size_; size_t promoted_size_;
LocalAllocator allocator_; LocalAllocator allocator_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment