Commit f3cdf8a9 authored by mattloring's avatar mattloring Committed by Commit bot

Allocation sampling for paged/large object spaces

This change expands allocation sampling to include old, map, code, and large object spaces. This involved refactoring much of the observation logic out of NewSpace into Space and overriding as needed in sub-classes.

Additionally, the sampling heap profiler now maintains a pair of heap observers. One observer is used for observing new space and resetting the inline allocation limit to be periodically notified of allocations. The other observes allocation across the other spaces where there is no additional work required to observe allocations.

Tests have been updated to ensure that allocations are observed correctly for Paged and LargeObject spaces.

R=ofrobots@google.com, hpayer@chromium.org, ulan@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1625753002

Cr-Commit-Position: refs/heads/master@{#33959}
parent 143a120f
...@@ -53,10 +53,10 @@ struct Heap::StrongRootsList { ...@@ -53,10 +53,10 @@ struct Heap::StrongRootsList {
StrongRootsList* next; StrongRootsList* next;
}; };
class IdleScavengeObserver : public InlineAllocationObserver { class IdleScavengeObserver : public AllocationObserver {
public: public:
IdleScavengeObserver(Heap& heap, intptr_t step_size) IdleScavengeObserver(Heap& heap, intptr_t step_size)
: InlineAllocationObserver(step_size), heap_(heap) {} : AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override { void Step(int bytes_allocated, Address, size_t) override {
heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated); heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
...@@ -1414,7 +1414,7 @@ void Heap::CallGCEpilogueCallbacks(GCType gc_type, ...@@ -1414,7 +1414,7 @@ void Heap::CallGCEpilogueCallbacks(GCType gc_type,
void Heap::MarkCompact() { void Heap::MarkCompact() {
PauseInlineAllocationObserversScope pause_observers(new_space()); PauseAllocationObserversScope pause_observers(this);
gc_state_ = MARK_COMPACT; gc_state_ = MARK_COMPACT;
LOG(isolate_, ResourceEvent("markcompact", "begin")); LOG(isolate_, ResourceEvent("markcompact", "begin"));
...@@ -1615,7 +1615,7 @@ void Heap::Scavenge() { ...@@ -1615,7 +1615,7 @@ void Heap::Scavenge() {
// Bump-pointer allocations done during scavenge are not real allocations. // Bump-pointer allocations done during scavenge are not real allocations.
// Pause the inline allocation steps. // Pause the inline allocation steps.
PauseInlineAllocationObserversScope pause_observers(new_space()); PauseAllocationObserversScope pause_observers(this);
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this); if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
...@@ -5166,7 +5166,7 @@ bool Heap::SetUp() { ...@@ -5166,7 +5166,7 @@ bool Heap::SetUp() {
idle_scavenge_observer_ = new IdleScavengeObserver( idle_scavenge_observer_ = new IdleScavengeObserver(
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask); *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
new_space()->AddInlineAllocationObserver(idle_scavenge_observer_); new_space()->AddAllocationObserver(idle_scavenge_observer_);
return true; return true;
} }
...@@ -5266,7 +5266,7 @@ void Heap::TearDown() { ...@@ -5266,7 +5266,7 @@ void Heap::TearDown() {
PrintAlloctionsHash(); PrintAlloctionsHash();
} }
new_space()->RemoveInlineAllocationObserver(idle_scavenge_observer_); new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
delete idle_scavenge_observer_; delete idle_scavenge_observer_;
idle_scavenge_observer_ = nullptr; idle_scavenge_observer_ = nullptr;
......
...@@ -275,6 +275,7 @@ namespace internal { ...@@ -275,6 +275,7 @@ namespace internal {
PRIVATE_SYMBOL_LIST(V) PRIVATE_SYMBOL_LIST(V)
// Forward declarations. // Forward declarations.
class AllocationObserver;
class ArrayBufferTracker; class ArrayBufferTracker;
class GCIdleTimeAction; class GCIdleTimeAction;
class GCIdleTimeHandler; class GCIdleTimeHandler;
...@@ -283,7 +284,6 @@ class GCTracer; ...@@ -283,7 +284,6 @@ class GCTracer;
class HeapObjectsFilter; class HeapObjectsFilter;
class HeapStats; class HeapStats;
class HistogramTimer; class HistogramTimer;
class InlineAllocationObserver;
class Isolate; class Isolate;
class MemoryReducer; class MemoryReducer;
class ObjectStats; class ObjectStats;
...@@ -2119,7 +2119,7 @@ class Heap { ...@@ -2119,7 +2119,7 @@ class Heap {
ScavengeJob* scavenge_job_; ScavengeJob* scavenge_job_;
InlineAllocationObserver* idle_scavenge_observer_; AllocationObserver* idle_scavenge_observer_;
// These two counters are monotomically increasing and never reset. // These two counters are monotomically increasing and never reset.
size_t full_codegen_bytes_generated_; size_t full_codegen_bytes_generated_;
...@@ -2572,20 +2572,19 @@ class PathTracer : public ObjectVisitor { ...@@ -2572,20 +2572,19 @@ class PathTracer : public ObjectVisitor {
#endif // DEBUG #endif // DEBUG
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Allows observation of inline allocation in the new space. // Allows observation of allocations.
class InlineAllocationObserver { class AllocationObserver {
public: public:
explicit InlineAllocationObserver(intptr_t step_size) explicit AllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) { : step_size_(step_size), bytes_to_next_step_(step_size) {
DCHECK(step_size >= kPointerSize); DCHECK(step_size >= kPointerSize);
} }
virtual ~InlineAllocationObserver() {} virtual ~AllocationObserver() {}
// Called each time the new space does an inline allocation step. This may be // Called each time the observed space does an allocation step. This may be
// more frequently than the step_size we are monitoring (e.g. when there are // more frequently than the step_size we are monitoring (e.g. when there are
// multiple observers, or when page or space boundary is encountered.) // multiple observers, or when page or space boundary is encountered.)
void InlineAllocationStep(int bytes_allocated, Address soon_object, void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
size_t size) {
bytes_to_next_step_ -= bytes_allocated; bytes_to_next_step_ -= bytes_allocated;
if (bytes_to_next_step_ <= 0) { if (bytes_to_next_step_ <= 0) {
Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
...@@ -2620,8 +2619,10 @@ class InlineAllocationObserver { ...@@ -2620,8 +2619,10 @@ class InlineAllocationObserver {
intptr_t bytes_to_next_step_; intptr_t bytes_to_next_step_;
private: private:
friend class LargeObjectSpace;
friend class NewSpace; friend class NewSpace;
DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver); friend class PagedSpace;
DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
}; };
} // namespace internal } // namespace internal
......
...@@ -23,7 +23,6 @@ IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() { ...@@ -23,7 +23,6 @@ IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
IncrementalMarking::DO_NOT_FORCE_COMPLETION); IncrementalMarking::DO_NOT_FORCE_COMPLETION);
} }
IncrementalMarking::IncrementalMarking(Heap* heap) IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap), : heap_(heap),
observer_(*this, kAllocatedThreshold), observer_(*this, kAllocatedThreshold),
...@@ -46,7 +45,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap) ...@@ -46,7 +45,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
incremental_marking_finalization_rounds_(0), incremental_marking_finalization_rounds_(0),
request_type_(COMPLETE_MARKING) {} request_type_(COMPLETE_MARKING) {}
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) { bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value); HeapObject* value_heap_obj = HeapObject::cast(value);
MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj); MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
...@@ -549,7 +547,7 @@ void IncrementalMarking::Start(const char* reason) { ...@@ -549,7 +547,7 @@ void IncrementalMarking::Start(const char* reason) {
state_ = SWEEPING; state_ = SWEEPING;
} }
heap_->new_space()->AddInlineAllocationObserver(&observer_); heap_->new_space()->AddAllocationObserver(&observer_);
incremental_marking_job()->Start(heap_); incremental_marking_job()->Start(heap_);
} }
...@@ -953,7 +951,7 @@ void IncrementalMarking::Stop() { ...@@ -953,7 +951,7 @@ void IncrementalMarking::Stop() {
PrintF("[IncrementalMarking] Stopping.\n"); PrintF("[IncrementalMarking] Stopping.\n");
} }
heap_->new_space()->RemoveInlineAllocationObserver(&observer_); heap_->new_space()->RemoveAllocationObserver(&observer_);
IncrementalMarking::set_should_hurry(false); IncrementalMarking::set_should_hurry(false);
ResetStepCounters(); ResetStepCounters();
if (IsMarking()) { if (IsMarking()) {
......
...@@ -219,10 +219,10 @@ class IncrementalMarking { ...@@ -219,10 +219,10 @@ class IncrementalMarking {
} }
private: private:
class Observer : public InlineAllocationObserver { class Observer : public AllocationObserver {
public: public:
Observer(IncrementalMarking& incremental_marking, intptr_t step_size) Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
: InlineAllocationObserver(step_size), : AllocationObserver(step_size),
incremental_marking_(incremental_marking) {} incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override { void Step(int bytes_allocated, Address, size_t) override {
......
...@@ -492,12 +492,18 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes, ...@@ -492,12 +492,18 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes, AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) { AllocationAlignment alignment) {
#ifdef V8_HOST_ARCH_32_BIT #ifdef V8_HOST_ARCH_32_BIT
return alignment == kDoubleAligned AllocationResult result =
? AllocateRawAligned(size_in_bytes, kDoubleAligned) alignment == kDoubleAligned
: AllocateRawUnaligned(size_in_bytes); ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
: AllocateRawUnaligned(size_in_bytes);
#else #else
return AllocateRawUnaligned(size_in_bytes); AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif #endif
HeapObject* heap_obj = nullptr;
if (!result.IsRetry() && result.To(&heap_obj)) {
AllocationStep(heap_obj->address(), size_in_bytes);
}
return result;
} }
......
...@@ -71,6 +71,20 @@ bool HeapObjectIterator::AdvanceToNextPage() { ...@@ -71,6 +71,20 @@ bool HeapObjectIterator::AdvanceToNextPage() {
return true; return true;
} }
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
AllSpaces spaces(heap_);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
space->PauseAllocationObservers();
}
}
PauseAllocationObserversScope::~PauseAllocationObserversScope() {
AllSpaces spaces(heap_);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
space->ResumeAllocationObservers();
}
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// CodeRange // CodeRange
...@@ -961,6 +975,14 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) == ...@@ -961,6 +975,14 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
ObjectSpace::kObjectSpaceMapSpace); ObjectSpace::kObjectSpaceMapSpace);
void Space::AllocationStep(Address soon_object, int size) {
if (!allocation_observers_paused_) {
for (int i = 0; i < allocation_observers_->length(); ++i) {
AllocationObserver* o = (*allocation_observers_)[i];
o->AllocationStep(size, soon_object, size);
}
}
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable) Executability executable)
...@@ -1466,8 +1488,7 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { ...@@ -1466,8 +1488,7 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
Address high = to_space_.page_high(); Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes; Address new_top = allocation_info_.top() + size_in_bytes;
allocation_info_.set_limit(Min(new_top, high)); allocation_info_.set_limit(Min(new_top, high));
} else if (inline_allocation_observers_paused_ || } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
top_on_previous_step_ == 0) {
// Normal limit is the end of the current page. // Normal limit is the end of the current page.
allocation_info_.set_limit(to_space_.page_high()); allocation_info_.set_limit(to_space_.page_high());
} else { } else {
...@@ -1548,9 +1569,9 @@ bool NewSpace::EnsureAllocation(int size_in_bytes, ...@@ -1548,9 +1569,9 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
void NewSpace::StartNextInlineAllocationStep() { void NewSpace::StartNextInlineAllocationStep() {
if (!inline_allocation_observers_paused_) { if (!allocation_observers_paused_) {
top_on_previous_step_ = top_on_previous_step_ =
inline_allocation_observers_.length() ? allocation_info_.top() : 0; allocation_observers_->length() ? allocation_info_.top() : 0;
UpdateInlineAllocationLimit(0); UpdateInlineAllocationLimit(0);
} }
} }
...@@ -1558,44 +1579,36 @@ void NewSpace::StartNextInlineAllocationStep() { ...@@ -1558,44 +1579,36 @@ void NewSpace::StartNextInlineAllocationStep() {
intptr_t NewSpace::GetNextInlineAllocationStepSize() { intptr_t NewSpace::GetNextInlineAllocationStepSize() {
intptr_t next_step = 0; intptr_t next_step = 0;
for (int i = 0; i < inline_allocation_observers_.length(); ++i) { for (int i = 0; i < allocation_observers_->length(); ++i) {
InlineAllocationObserver* o = inline_allocation_observers_[i]; AllocationObserver* o = (*allocation_observers_)[i];
next_step = next_step ? Min(next_step, o->bytes_to_next_step()) next_step = next_step ? Min(next_step, o->bytes_to_next_step())
: o->bytes_to_next_step(); : o->bytes_to_next_step();
} }
DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0); DCHECK(allocation_observers_->length() == 0 || next_step != 0);
return next_step; return next_step;
} }
void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) { Space::AddAllocationObserver(observer);
inline_allocation_observers_.Add(observer);
StartNextInlineAllocationStep(); StartNextInlineAllocationStep();
} }
void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
void NewSpace::RemoveInlineAllocationObserver( Space::RemoveAllocationObserver(observer);
InlineAllocationObserver* observer) {
bool removed = inline_allocation_observers_.RemoveElement(observer);
// Only used in assertion. Suppress unused variable warning.
static_cast<void>(removed);
DCHECK(removed);
StartNextInlineAllocationStep(); StartNextInlineAllocationStep();
} }
void NewSpace::PauseAllocationObservers() {
void NewSpace::PauseInlineAllocationObservers() {
// Do a step to account for memory allocated so far. // Do a step to account for memory allocated so far.
InlineAllocationStep(top(), top(), nullptr, 0); InlineAllocationStep(top(), top(), nullptr, 0);
inline_allocation_observers_paused_ = true; Space::PauseAllocationObservers();
top_on_previous_step_ = 0; top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0); UpdateInlineAllocationLimit(0);
} }
void NewSpace::ResumeAllocationObservers() {
void NewSpace::ResumeInlineAllocationObservers() {
DCHECK(top_on_previous_step_ == 0); DCHECK(top_on_previous_step_ == 0);
inline_allocation_observers_paused_ = false; Space::ResumeAllocationObservers();
StartNextInlineAllocationStep(); StartNextInlineAllocationStep();
} }
...@@ -1604,9 +1617,9 @@ void NewSpace::InlineAllocationStep(Address top, Address new_top, ...@@ -1604,9 +1617,9 @@ void NewSpace::InlineAllocationStep(Address top, Address new_top,
Address soon_object, size_t size) { Address soon_object, size_t size) {
if (top_on_previous_step_) { if (top_on_previous_step_) {
int bytes_allocated = static_cast<int>(top - top_on_previous_step_); int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
for (int i = 0; i < inline_allocation_observers_.length(); ++i) { for (int i = 0; i < allocation_observers_->length(); ++i) {
inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated, (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
soon_object, size); size);
} }
top_on_previous_step_ = new_top; top_on_previous_step_ = new_top;
} }
...@@ -2500,6 +2513,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { ...@@ -2500,6 +2513,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
int new_node_size = 0; int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == nullptr) return nullptr; if (new_node == nullptr) return nullptr;
owner_->AllocationStep(new_node->address(), size_in_bytes);
int bytes_left = new_node_size - size_in_bytes; int bytes_left = new_node_size - size_in_bytes;
DCHECK(bytes_left >= 0); DCHECK(bytes_left >= 0);
...@@ -3011,6 +3025,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, ...@@ -3011,6 +3025,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
} }
heap()->incremental_marking()->OldSpaceStep(object_size); heap()->incremental_marking()->OldSpaceStep(object_size);
AllocationStep(object->address(), object_size);
return object; return object;
} }
......
...@@ -20,10 +20,10 @@ namespace v8 { ...@@ -20,10 +20,10 @@ namespace v8 {
namespace internal { namespace internal {
class AllocationInfo; class AllocationInfo;
class AllocationObserver;
class CompactionSpace; class CompactionSpace;
class CompactionSpaceCollection; class CompactionSpaceCollection;
class FreeList; class FreeList;
class InlineAllocationObserver;
class Isolate; class Isolate;
class MemoryAllocator; class MemoryAllocator;
class MemoryChunk; class MemoryChunk;
...@@ -902,7 +902,9 @@ class LargePage : public MemoryChunk { ...@@ -902,7 +902,9 @@ class LargePage : public MemoryChunk {
class Space : public Malloced { class Space : public Malloced {
public: public:
Space(Heap* heap, AllocationSpace id, Executability executable) Space(Heap* heap, AllocationSpace id, Executability executable)
: heap_(heap), : allocation_observers_(new List<AllocationObserver*>()),
allocation_observers_paused_(false),
heap_(heap),
id_(id), id_(id),
executable_(executable), executable_(executable),
committed_(0), committed_(0),
...@@ -918,6 +920,26 @@ class Space : public Malloced { ...@@ -918,6 +920,26 @@ class Space : public Malloced {
// Identity used in error reporting. // Identity used in error reporting.
AllocationSpace identity() { return id_; } AllocationSpace identity() { return id_; }
virtual void AddAllocationObserver(AllocationObserver* observer) {
allocation_observers_->Add(observer);
}
virtual void RemoveAllocationObserver(AllocationObserver* observer) {
bool removed = allocation_observers_->RemoveElement(observer);
USE(removed);
DCHECK(removed);
}
virtual void PauseAllocationObservers() {
allocation_observers_paused_ = true;
}
virtual void ResumeAllocationObservers() {
allocation_observers_paused_ = false;
}
void AllocationStep(Address soon_object, int size);
// Return the total amount committed memory for this space, i.e., allocatable // Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers. // memory and page headers.
virtual intptr_t CommittedMemory() { return committed_; } virtual intptr_t CommittedMemory() { return committed_; }
...@@ -964,6 +986,9 @@ class Space : public Malloced { ...@@ -964,6 +986,9 @@ class Space : public Malloced {
DCHECK_GE(committed_, 0); DCHECK_GE(committed_, 0);
} }
v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
bool allocation_observers_paused_;
private: private:
Heap* heap_; Heap* heap_;
AllocationSpace id_; AllocationSpace id_;
...@@ -2485,8 +2510,7 @@ class NewSpace : public Space { ...@@ -2485,8 +2510,7 @@ class NewSpace : public Space {
to_space_(heap, kToSpace), to_space_(heap, kToSpace),
from_space_(heap, kFromSpace), from_space_(heap, kFromSpace),
reservation_(), reservation_(),
top_on_previous_step_(0), top_on_previous_step_(0) {}
inline_allocation_observers_paused_(false) {}
inline bool Contains(HeapObject* o); inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a); inline bool ContainsSlow(Address a);
...@@ -2636,21 +2660,25 @@ class NewSpace : public Space { ...@@ -2636,21 +2660,25 @@ class NewSpace : public Space {
// Reset the allocation pointer to the beginning of the active semispace. // Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo(); void ResetAllocationInfo();
// When inline allocation stepping is active, either because of incremental
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
// inline allocation every once in a while. This is done by setting
// allocation_info_.limit to be lower than the actual limit and and increasing
// it in steps to guarantee that the observers are notified periodically.
void UpdateInlineAllocationLimit(int size_in_bytes); void UpdateInlineAllocationLimit(int size_in_bytes);
void DisableInlineAllocationSteps() {
top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
}
// Allows observation of inline allocation. The observer->Step() method gets // Allows observation of inline allocation. The observer->Step() method gets
// called after every step_size bytes have been allocated (approximately). // called after every step_size bytes have been allocated (approximately).
// This works by adjusting the allocation limit to a lower value and adjusting // This works by adjusting the allocation limit to a lower value and adjusting
// it after each step. // it after each step.
void AddInlineAllocationObserver(InlineAllocationObserver* observer); void AddAllocationObserver(AllocationObserver* observer) override;
// Removes a previously installed observer. void RemoveAllocationObserver(AllocationObserver* observer) override;
void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
void DisableInlineAllocationSteps() {
top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
}
// Get the extent of the inactive semispace (for use as a marking stack, // Get the extent of the inactive semispace (for use as a marking stack,
// or to zap it). Notice: space-addresses are not necessarily on the // or to zap it). Notice: space-addresses are not necessarily on the
...@@ -2714,6 +2742,9 @@ class NewSpace : public Space { ...@@ -2714,6 +2742,9 @@ class NewSpace : public Space {
SemiSpace* active_space() { return &to_space_; } SemiSpace* active_space() { return &to_space_; }
void PauseAllocationObservers() override;
void ResumeAllocationObservers() override;
private: private:
// Update allocation info to match the current to-space page. // Update allocation info to match the current to-space page.
void UpdateAllocationInfo(); void UpdateAllocationInfo();
...@@ -2736,14 +2767,7 @@ class NewSpace : public Space { ...@@ -2736,14 +2767,7 @@ class NewSpace : public Space {
// mark-compact collection. // mark-compact collection.
AllocationInfo allocation_info_; AllocationInfo allocation_info_;
// When inline allocation stepping is active, either because of incremental
// marking or because of idle scavenge, we 'interrupt' inline allocation every
// once in a while. This is done by setting allocation_info_.limit to be lower
// than the actual limit and and increasing it in steps to guarantee that the
// observers are notified periodically.
List<InlineAllocationObserver*> inline_allocation_observers_;
Address top_on_previous_step_; Address top_on_previous_step_;
bool inline_allocation_observers_paused_;
HistogramInfo* allocated_histogram_; HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_; HistogramInfo* promoted_histogram_;
...@@ -2760,26 +2784,18 @@ class NewSpace : public Space { ...@@ -2760,26 +2784,18 @@ class NewSpace : public Space {
size_t size); size_t size);
intptr_t GetNextInlineAllocationStepSize(); intptr_t GetNextInlineAllocationStepSize();
void StartNextInlineAllocationStep(); void StartNextInlineAllocationStep();
void PauseInlineAllocationObservers();
void ResumeInlineAllocationObservers();
friend class PauseInlineAllocationObserversScope;
friend class SemiSpaceIterator; friend class SemiSpaceIterator;
}; };
class PauseInlineAllocationObserversScope { class PauseAllocationObserversScope {
public: public:
explicit PauseInlineAllocationObserversScope(NewSpace* new_space) explicit PauseAllocationObserversScope(Heap* heap);
: new_space_(new_space) { ~PauseAllocationObserversScope();
new_space_->PauseInlineAllocationObservers();
}
~PauseInlineAllocationObserversScope() {
new_space_->ResumeInlineAllocationObservers();
}
private: private:
NewSpace* new_space_; Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope); DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
}; };
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
......
...@@ -16,23 +16,52 @@ ...@@ -16,23 +16,52 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// We sample with a Poisson process, with constant average sampling interval.
// This follows the exponential probability distribution with parameter
// λ = 1/rate where rate is the average number of bytes between samples.
//
// Let u be a uniformly distributed random number between 0 and 1, then
// next_sample = (- ln u) / λ
intptr_t SamplingAllocationObserver::GetNextSampleInterval(uint64_t rate) {
if (FLAG_sampling_heap_profiler_suppress_randomness) {
return rate;
}
double u = random_->NextDouble();
double next = (-std::log(u)) * rate;
return next < kPointerSize
? kPointerSize
: (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
}
SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names, SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
uint64_t rate, int stack_depth) uint64_t rate, int stack_depth)
: InlineAllocationObserver(GetNextSampleInterval( : isolate_(heap->isolate()),
heap->isolate()->random_number_generator(), rate)),
isolate_(heap->isolate()),
heap_(heap), heap_(heap),
random_(isolate_->random_number_generator()), new_space_observer_(new SamplingAllocationObserver(
heap_, rate, rate, this, heap->isolate()->random_number_generator())),
other_spaces_observer_(new SamplingAllocationObserver(
heap_, rate, rate, this, heap->isolate()->random_number_generator())),
names_(names), names_(names),
samples_(), samples_(),
rate_(rate),
stack_depth_(stack_depth) { stack_depth_(stack_depth) {
heap->new_space()->AddInlineAllocationObserver(this); heap->new_space()->AddAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
if (space != heap->new_space()) {
space->AddAllocationObserver(other_spaces_observer_.get());
}
}
} }
SamplingHeapProfiler::~SamplingHeapProfiler() { SamplingHeapProfiler::~SamplingHeapProfiler() {
heap_->new_space()->RemoveInlineAllocationObserver(this); heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap_);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
if (space != heap_->new_space()) {
space->RemoveAllocationObserver(other_spaces_observer_.get());
}
}
// Clear samples and drop all the weak references we are keeping. // Clear samples and drop all the weak references we are keeping.
std::set<SampledAllocation*>::iterator it; std::set<SampledAllocation*>::iterator it;
...@@ -43,13 +72,6 @@ SamplingHeapProfiler::~SamplingHeapProfiler() { ...@@ -43,13 +72,6 @@ SamplingHeapProfiler::~SamplingHeapProfiler() {
samples_.swap(empty); samples_.swap(empty);
} }
void SamplingHeapProfiler::Step(int bytes_allocated, Address soon_object,
size_t size) {
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
DCHECK(soon_object);
SampleObject(soon_object, size);
}
void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) { void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
...@@ -70,25 +92,6 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) { ...@@ -70,25 +92,6 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
} }
// We sample with a Poisson process, with constant average sampling interval.
// This follows the exponential probability distribution with parameter
// λ = 1/rate where rate is the average number of bytes between samples.
//
// Let u be a uniformly distributed random number between 0 and 1, then
// next_sample = (- ln u) / λ
intptr_t SamplingHeapProfiler::GetNextSampleInterval(
base::RandomNumberGenerator* random, uint64_t rate) {
if (FLAG_sampling_heap_profiler_suppress_randomness) {
return rate;
}
double u = random->NextDouble();
double next = (-std::log(u)) * rate;
return next < kPointerSize
? kPointerSize
: (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
}
void SamplingHeapProfiler::SampledAllocation::OnWeakCallback( void SamplingHeapProfiler::SampledAllocation::OnWeakCallback(
const WeakCallbackInfo<SampledAllocation>& data) { const WeakCallbackInfo<SampledAllocation>& data) {
SampledAllocation* sample = data.GetParameter(); SampledAllocation* sample = data.GetParameter();
...@@ -159,8 +162,7 @@ SamplingHeapProfiler::SampledAllocation::SampledAllocation( ...@@ -159,8 +162,7 @@ SamplingHeapProfiler::SampledAllocation::SampledAllocation(
} }
} }
v8::AllocationProfile::Node* SamplingHeapProfiler::AllocateNode(
SamplingHeapProfiler::Node* SamplingHeapProfiler::AllocateNode(
AllocationProfile* profile, const std::map<int, Script*>& scripts, AllocationProfile* profile, const std::map<int, Script*>& scripts,
FunctionInfo* function_info) { FunctionInfo* function_info) {
DCHECK(function_info->get_name()); DCHECK(function_info->get_name());
...@@ -180,37 +182,36 @@ SamplingHeapProfiler::Node* SamplingHeapProfiler::AllocateNode( ...@@ -180,37 +182,36 @@ SamplingHeapProfiler::Node* SamplingHeapProfiler::AllocateNode(
function_info->get_start_position()); function_info->get_start_position());
} }
profile->nodes().push_back( profile->nodes().push_back(v8::AllocationProfile::Node(
Node({ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String( {ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(
function_info->get_name())), function_info->get_name())),
ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String( ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(
function_info->get_script_name())), function_info->get_script_name())),
function_info->get_script_id(), function_info->get_start_position(), function_info->get_script_id(), function_info->get_start_position(),
line, column, std::vector<Node*>(), line, column, std::vector<v8::AllocationProfile::Node*>(),
std::vector<v8::AllocationProfile::Allocation>()})); std::vector<v8::AllocationProfile::Allocation>()}));
return &profile->nodes().back(); return &profile->nodes().back();
} }
v8::AllocationProfile::Node* SamplingHeapProfiler::FindOrAddChildNode(
SamplingHeapProfiler::Node* SamplingHeapProfiler::FindOrAddChildNode(
AllocationProfile* profile, const std::map<int, Script*>& scripts, AllocationProfile* profile, const std::map<int, Script*>& scripts,
Node* parent, FunctionInfo* function_info) { v8::AllocationProfile::Node* parent, FunctionInfo* function_info) {
for (Node* child : parent->children) { for (v8::AllocationProfile::Node* child : parent->children) {
if (child->script_id == function_info->get_script_id() && if (child->script_id == function_info->get_script_id() &&
child->start_position == function_info->get_start_position()) child->start_position == function_info->get_start_position())
return child; return child;
} }
Node* child = AllocateNode(profile, scripts, function_info); v8::AllocationProfile::Node* child =
AllocateNode(profile, scripts, function_info);
parent->children.push_back(child); parent->children.push_back(child);
return child; return child;
} }
v8::AllocationProfile::Node* SamplingHeapProfiler::AddStack(
SamplingHeapProfiler::Node* SamplingHeapProfiler::AddStack(
AllocationProfile* profile, const std::map<int, Script*>& scripts, AllocationProfile* profile, const std::map<int, Script*>& scripts,
const std::vector<FunctionInfo*>& stack) { const std::vector<FunctionInfo*>& stack) {
Node* node = profile->GetRootNode(); v8::AllocationProfile::Node* node = profile->GetRootNode();
// We need to process the stack in reverse order as the top of the stack is // We need to process the stack in reverse order as the top of the stack is
// the first element in the list. // the first element in the list.
...@@ -241,7 +242,8 @@ v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() { ...@@ -241,7 +242,8 @@ v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
AllocateNode(profile, scripts, &function_info); AllocateNode(profile, scripts, &function_info);
for (SampledAllocation* allocation : samples_) { for (SampledAllocation* allocation : samples_) {
Node* node = AddStack(profile, scripts, allocation->get_stack()); v8::AllocationProfile::Node* node =
AddStack(profile, scripts, allocation->get_stack());
node->allocations.push_back({allocation->get_size(), 1}); node->allocations.push_back({allocation->get_size(), 1});
} }
......
...@@ -20,24 +20,25 @@ class RandomNumberGenerator; ...@@ -20,24 +20,25 @@ class RandomNumberGenerator;
namespace internal { namespace internal {
class SamplingAllocationObserver;
class AllocationProfile : public v8::AllocationProfile { class AllocationProfile : public v8::AllocationProfile {
public: public:
AllocationProfile() : nodes_() {} AllocationProfile() : nodes_() {}
Node* GetRootNode() override { v8::AllocationProfile::Node* GetRootNode() override {
return nodes_.size() == 0 ? nullptr : &nodes_.front(); return nodes_.size() == 0 ? nullptr : &nodes_.front();
} }
std::deque<Node>& nodes() { return nodes_; } std::deque<v8::AllocationProfile::Node>& nodes() { return nodes_; }
private: private:
std::deque<Node> nodes_; std::deque<v8::AllocationProfile::Node> nodes_;
DISALLOW_COPY_AND_ASSIGN(AllocationProfile); DISALLOW_COPY_AND_ASSIGN(AllocationProfile);
}; };
class SamplingHeapProfiler : public InlineAllocationObserver { class SamplingHeapProfiler {
public: public:
SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate, SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
int stack_depth); int stack_depth);
...@@ -45,11 +46,6 @@ class SamplingHeapProfiler : public InlineAllocationObserver { ...@@ -45,11 +46,6 @@ class SamplingHeapProfiler : public InlineAllocationObserver {
v8::AllocationProfile* GetAllocationProfile(); v8::AllocationProfile* GetAllocationProfile();
void Step(int bytes_allocated, Address soon_object, size_t size) override;
intptr_t GetNextStepSize() override {
return GetNextSampleInterval(random_, rate_);
}
StringsStorage* names() const { return names_; } StringsStorage* names() const { return names_; }
class FunctionInfo { class FunctionInfo {
...@@ -99,35 +95,61 @@ class SamplingHeapProfiler : public InlineAllocationObserver { ...@@ -99,35 +95,61 @@ class SamplingHeapProfiler : public InlineAllocationObserver {
}; };
private: private:
using Node = v8::AllocationProfile::Node;
Heap* heap() const { return heap_; } Heap* heap() const { return heap_; }
void SampleObject(Address soon_object, size_t size); void SampleObject(Address soon_object, size_t size);
static intptr_t GetNextSampleInterval(base::RandomNumberGenerator* random,
uint64_t rate);
// Methods that construct v8::AllocationProfile. // Methods that construct v8::AllocationProfile.
Node* AddStack(AllocationProfile* profile, v8::AllocationProfile::Node* AddStack(
const std::map<int, Script*>& scripts, AllocationProfile* profile, const std::map<int, Script*>& scripts,
const std::vector<FunctionInfo*>& stack); const std::vector<FunctionInfo*>& stack);
Node* FindOrAddChildNode(AllocationProfile* profile, v8::AllocationProfile::Node* FindOrAddChildNode(
const std::map<int, Script*>& scripts, Node* parent, AllocationProfile* profile, const std::map<int, Script*>& scripts,
FunctionInfo* function_info); v8::AllocationProfile::Node* parent, FunctionInfo* function_info);
Node* AllocateNode(AllocationProfile* profile, v8::AllocationProfile::Node* AllocateNode(
const std::map<int, Script*>& scripts, AllocationProfile* profile, const std::map<int, Script*>& scripts,
FunctionInfo* function_info); FunctionInfo* function_info);
Isolate* const isolate_; Isolate* const isolate_;
Heap* const heap_; Heap* const heap_;
base::RandomNumberGenerator* const random_; base::SmartPointer<SamplingAllocationObserver> new_space_observer_;
base::SmartPointer<SamplingAllocationObserver> other_spaces_observer_;
StringsStorage* const names_; StringsStorage* const names_;
std::set<SampledAllocation*> samples_; std::set<SampledAllocation*> samples_;
const uint64_t rate_;
const int stack_depth_; const int stack_depth_;
friend class SamplingAllocationObserver;
}; };
class SamplingAllocationObserver : public AllocationObserver {
public:
SamplingAllocationObserver(Heap* heap, intptr_t step_size, uint64_t rate,
SamplingHeapProfiler* profiler,
base::RandomNumberGenerator* random)
: AllocationObserver(step_size),
profiler_(profiler),
heap_(heap),
random_(random),
rate_(rate) {}
virtual ~SamplingAllocationObserver() {}
protected:
void Step(int bytes_allocated, Address soon_object, size_t size) override {
USE(heap_);
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
DCHECK(soon_object);
profiler_->SampleObject(soon_object, size);
}
intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
private:
intptr_t GetNextSampleInterval(uint64_t rate);
SamplingHeapProfiler* const profiler_;
Heap* const heap_;
base::RandomNumberGenerator* const random_;
uint64_t const rate_;
};
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -523,10 +523,27 @@ static HeapObject* AllocateUnaligned(NewSpace* space, int size) { ...@@ -523,10 +523,27 @@ static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
return filler; return filler;
} }
class Observer : public InlineAllocationObserver { static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
CHECK(!allocation.IsRetry());
HeapObject* filler = NULL;
CHECK(allocation.To(&filler));
space->heap()->CreateFillerObjectAt(filler->address(), size);
return filler;
}
static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
CHECK(!allocation.IsRetry());
HeapObject* filler = NULL;
CHECK(allocation.To(&filler));
return filler;
}
class Observer : public AllocationObserver {
public: public:
explicit Observer(intptr_t step_size) explicit Observer(intptr_t step_size)
: InlineAllocationObserver(step_size), count_(0) {} : AllocationObserver(step_size), count_(0) {}
void Step(int bytes_allocated, Address, size_t) override { count_++; } void Step(int bytes_allocated, Address, size_t) override { count_++; }
...@@ -536,85 +553,93 @@ class Observer : public InlineAllocationObserver { ...@@ -536,85 +553,93 @@ class Observer : public InlineAllocationObserver {
int count_; int count_;
}; };
template <typename T>
void testAllocationObserver(Isolate* i_isolate, T* space) {
Observer observer1(128);
space->AddAllocationObserver(&observer1);
UNINITIALIZED_TEST(InlineAllocationObserver) { // The observer should not get notified if we have only allocated less than
v8::Isolate::CreateParams create_params; // 128 bytes.
create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); AllocateUnaligned(space, 64);
v8::Isolate* isolate = v8::Isolate::New(create_params); CHECK_EQ(observer1.count(), 0);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
NewSpace* new_space = i_isolate->heap()->new_space(); // The observer should get called when we have allocated exactly 128 bytes.
AllocateUnaligned(space, 64);
CHECK_EQ(observer1.count(), 1);
Observer observer1(128); // Another >128 bytes should get another notification.
new_space->AddInlineAllocationObserver(&observer1); AllocateUnaligned(space, 136);
CHECK_EQ(observer1.count(), 2);
// The observer should not get notified if we have only allocated less than // Allocating a large object should get only one notification.
// 128 bytes. AllocateUnaligned(space, 1024);
AllocateUnaligned(new_space, 64); CHECK_EQ(observer1.count(), 3);
CHECK_EQ(observer1.count(), 0);
// The observer should get called when we have allocated exactly 128 bytes. // Allocating another 2048 bytes in small objects should get 16
AllocateUnaligned(new_space, 64); // notifications.
CHECK_EQ(observer1.count(), 1); for (int i = 0; i < 64; ++i) {
AllocateUnaligned(space, 32);
}
CHECK_EQ(observer1.count(), 19);
// Another >128 bytes should get another notification. // Multiple observers should work.
AllocateUnaligned(new_space, 136); Observer observer2(96);
CHECK_EQ(observer1.count(), 2); space->AddAllocationObserver(&observer2);
// Allocating a large object should get only one notification. AllocateUnaligned(space, 2048);
AllocateUnaligned(new_space, 1024); CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer1.count(), 3); CHECK_EQ(observer2.count(), 1);
// Allocating another 2048 bytes in small objects should get 16 AllocateUnaligned(space, 104);
// notifications. CHECK_EQ(observer1.count(), 20);
for (int i = 0; i < 64; ++i) { CHECK_EQ(observer2.count(), 2);
AllocateUnaligned(new_space, 32);
}
CHECK_EQ(observer1.count(), 19);
// Multiple observers should work. // Callback should stop getting called after an observer is removed.
Observer observer2(96); space->RemoveAllocationObserver(&observer1);
new_space->AddInlineAllocationObserver(&observer2);
AllocateUnaligned(new_space, 2048); AllocateUnaligned(space, 384);
CHECK_EQ(observer1.count(), 20); CHECK_EQ(observer1.count(), 20); // no more notifications.
CHECK_EQ(observer2.count(), 1); CHECK_EQ(observer2.count(), 3); // this one is still active.
AllocateUnaligned(new_space, 104); // Ensure that PauseInlineAllocationObserversScope work correctly.
CHECK_EQ(observer1.count(), 20); AllocateUnaligned(space, 48);
CHECK_EQ(observer2.count(), 2); CHECK_EQ(observer2.count(), 3);
{
PauseAllocationObserversScope pause_observers(i_isolate->heap());
CHECK_EQ(observer2.count(), 3);
AllocateUnaligned(space, 384);
CHECK_EQ(observer2.count(), 3);
}
CHECK_EQ(observer2.count(), 3);
// Coupled with the 48 bytes allocated before the pause, another 48 bytes
// allocated here should trigger a notification.
AllocateUnaligned(space, 48);
CHECK_EQ(observer2.count(), 4);
space->RemoveAllocationObserver(&observer2);
AllocateUnaligned(space, 384);
CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer2.count(), 4);
}
// Callback should stop getting called after an observer is removed. UNINITIALIZED_TEST(AllocationObserver) {
new_space->RemoveInlineAllocationObserver(&observer1); v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
AllocateUnaligned(new_space, 384); Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
CHECK_EQ(observer1.count(), 20); // no more notifications.
CHECK_EQ(observer2.count(), 3); // this one is still active.
// Ensure that PauseInlineAllocationObserversScope work correctly. testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
AllocateUnaligned(new_space, 48); // Old space is used but the code path is shared for all
CHECK_EQ(observer2.count(), 3); // classes inheriting from PagedSpace.
{ testAllocationObserver<PagedSpace>(i_isolate,
PauseInlineAllocationObserversScope pause_observers(new_space); i_isolate->heap()->old_space());
CHECK_EQ(observer2.count(), 3); testAllocationObserver<LargeObjectSpace>(i_isolate,
AllocateUnaligned(new_space, 384); i_isolate->heap()->lo_space());
CHECK_EQ(observer2.count(), 3);
}
CHECK_EQ(observer2.count(), 3);
// Coupled with the 48 bytes allocated before the pause, another 48 bytes
// allocated here should trigger a notification.
AllocateUnaligned(new_space, 48);
CHECK_EQ(observer2.count(), 4);
new_space->RemoveInlineAllocationObserver(&observer2);
AllocateUnaligned(new_space, 384);
CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer2.count(), 4);
} }
isolate->Dispose(); isolate->Dispose();
} }
...@@ -634,16 +659,16 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) { ...@@ -634,16 +659,16 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
NewSpace* new_space = i_isolate->heap()->new_space(); NewSpace* new_space = i_isolate->heap()->new_space();
Observer observer1(512); Observer observer1(512);
new_space->AddInlineAllocationObserver(&observer1); new_space->AddAllocationObserver(&observer1);
Observer observer2(576); Observer observer2(576);
new_space->AddInlineAllocationObserver(&observer2); new_space->AddAllocationObserver(&observer2);
for (int i = 0; i < 512; ++i) { for (int i = 0; i < 512; ++i) {
AllocateUnaligned(new_space, 32); AllocateUnaligned(new_space, 32);
} }
new_space->RemoveInlineAllocationObserver(&observer1); new_space->RemoveAllocationObserver(&observer1);
new_space->RemoveInlineAllocationObserver(&observer2); new_space->RemoveAllocationObserver(&observer2);
CHECK_EQ(observer1.count(), 32); CHECK_EQ(observer1.count(), 32);
CHECK_EQ(observer2.count(), 28); CHECK_EQ(observer2.count(), 28);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment