Commit f3cdf8a9 authored by mattloring's avatar mattloring Committed by Commit bot

Allocation sampling for paged/large object spaces

This change expands allocation sampling to include old, map, code, and large object spaces. This involved refactoring much of the observation logic out of NewSpace into Space and overriding as needed in sub-classes.

Additionally, the sampling heap profiler now maintains a pair of heap observers. One observer is used for observing new space and resetting the inline allocation limit to be periodically notified of allocations. The other observes allocation across the other spaces where there is no additional work required to observe allocations.

Tests have been updated to ensure that allocations are observed correctly for Paged and LargeObject spaces.

R=ofrobots@google.com, hpayer@chromium.org, ulan@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1625753002

Cr-Commit-Position: refs/heads/master@{#33959}
parent 143a120f
......@@ -53,10 +53,10 @@ struct Heap::StrongRootsList {
StrongRootsList* next;
};
class IdleScavengeObserver : public InlineAllocationObserver {
class IdleScavengeObserver : public AllocationObserver {
public:
IdleScavengeObserver(Heap& heap, intptr_t step_size)
: InlineAllocationObserver(step_size), heap_(heap) {}
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
......@@ -1414,7 +1414,7 @@ void Heap::CallGCEpilogueCallbacks(GCType gc_type,
void Heap::MarkCompact() {
PauseInlineAllocationObserversScope pause_observers(new_space());
PauseAllocationObserversScope pause_observers(this);
gc_state_ = MARK_COMPACT;
LOG(isolate_, ResourceEvent("markcompact", "begin"));
......@@ -1615,7 +1615,7 @@ void Heap::Scavenge() {
// Bump-pointer allocations done during scavenge are not real allocations.
// Pause the inline allocation steps.
PauseInlineAllocationObserversScope pause_observers(new_space());
PauseAllocationObserversScope pause_observers(this);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
......@@ -5166,7 +5166,7 @@ bool Heap::SetUp() {
idle_scavenge_observer_ = new IdleScavengeObserver(
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
new_space()->AddInlineAllocationObserver(idle_scavenge_observer_);
new_space()->AddAllocationObserver(idle_scavenge_observer_);
return true;
}
......@@ -5266,7 +5266,7 @@ void Heap::TearDown() {
PrintAlloctionsHash();
}
new_space()->RemoveInlineAllocationObserver(idle_scavenge_observer_);
new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
delete idle_scavenge_observer_;
idle_scavenge_observer_ = nullptr;
......
......@@ -275,6 +275,7 @@ namespace internal {
PRIVATE_SYMBOL_LIST(V)
// Forward declarations.
class AllocationObserver;
class ArrayBufferTracker;
class GCIdleTimeAction;
class GCIdleTimeHandler;
......@@ -283,7 +284,6 @@ class GCTracer;
class HeapObjectsFilter;
class HeapStats;
class HistogramTimer;
class InlineAllocationObserver;
class Isolate;
class MemoryReducer;
class ObjectStats;
......@@ -2119,7 +2119,7 @@ class Heap {
ScavengeJob* scavenge_job_;
InlineAllocationObserver* idle_scavenge_observer_;
AllocationObserver* idle_scavenge_observer_;
// These two counters are monotomically increasing and never reset.
size_t full_codegen_bytes_generated_;
......@@ -2572,20 +2572,19 @@ class PathTracer : public ObjectVisitor {
#endif // DEBUG
// -----------------------------------------------------------------------------
// Allows observation of inline allocation in the new space.
class InlineAllocationObserver {
// Allows observation of allocations.
class AllocationObserver {
public:
explicit InlineAllocationObserver(intptr_t step_size)
explicit AllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) {
DCHECK(step_size >= kPointerSize);
}
virtual ~InlineAllocationObserver() {}
virtual ~AllocationObserver() {}
// Called each time the new space does an inline allocation step. This may be
// Called each time the observed space does an allocation step. This may be
// more frequently than the step_size we are monitoring (e.g. when there are
// multiple observers, or when page or space boundary is encountered.)
void InlineAllocationStep(int bytes_allocated, Address soon_object,
size_t size) {
void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
bytes_to_next_step_ -= bytes_allocated;
if (bytes_to_next_step_ <= 0) {
Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
......@@ -2620,8 +2619,10 @@ class InlineAllocationObserver {
intptr_t bytes_to_next_step_;
private:
friend class LargeObjectSpace;
friend class NewSpace;
DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver);
friend class PagedSpace;
DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
};
} // namespace internal
......
......@@ -23,7 +23,6 @@ IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
IncrementalMarking::DO_NOT_FORCE_COMPLETION);
}
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
observer_(*this, kAllocatedThreshold),
......@@ -46,7 +45,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
incremental_marking_finalization_rounds_(0),
request_type_(COMPLETE_MARKING) {}
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
......@@ -549,7 +547,7 @@ void IncrementalMarking::Start(const char* reason) {
state_ = SWEEPING;
}
heap_->new_space()->AddInlineAllocationObserver(&observer_);
heap_->new_space()->AddAllocationObserver(&observer_);
incremental_marking_job()->Start(heap_);
}
......@@ -953,7 +951,7 @@ void IncrementalMarking::Stop() {
PrintF("[IncrementalMarking] Stopping.\n");
}
heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
heap_->new_space()->RemoveAllocationObserver(&observer_);
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
if (IsMarking()) {
......
......@@ -219,10 +219,10 @@ class IncrementalMarking {
}
private:
class Observer : public InlineAllocationObserver {
class Observer : public AllocationObserver {
public:
Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
: InlineAllocationObserver(step_size),
: AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override {
......
......@@ -492,12 +492,18 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
#ifdef V8_HOST_ARCH_32_BIT
return alignment == kDoubleAligned
? AllocateRawAligned(size_in_bytes, kDoubleAligned)
: AllocateRawUnaligned(size_in_bytes);
AllocationResult result =
alignment == kDoubleAligned
? AllocateRawAligned(size_in_bytes, kDoubleAligned)
: AllocateRawUnaligned(size_in_bytes);
#else
return AllocateRawUnaligned(size_in_bytes);
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif
HeapObject* heap_obj = nullptr;
if (!result.IsRetry() && result.To(&heap_obj)) {
AllocationStep(heap_obj->address(), size_in_bytes);
}
return result;
}
......
......@@ -71,6 +71,20 @@ bool HeapObjectIterator::AdvanceToNextPage() {
return true;
}
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
AllSpaces spaces(heap_);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
space->PauseAllocationObservers();
}
}
PauseAllocationObserversScope::~PauseAllocationObserversScope() {
AllSpaces spaces(heap_);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
space->ResumeAllocationObservers();
}
}
// -----------------------------------------------------------------------------
// CodeRange
......@@ -961,6 +975,14 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
ObjectSpace::kObjectSpaceMapSpace);
void Space::AllocationStep(Address soon_object, int size) {
if (!allocation_observers_paused_) {
for (int i = 0; i < allocation_observers_->length(); ++i) {
AllocationObserver* o = (*allocation_observers_)[i];
o->AllocationStep(size, soon_object, size);
}
}
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
......@@ -1466,8 +1488,7 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
allocation_info_.set_limit(Min(new_top, high));
} else if (inline_allocation_observers_paused_ ||
top_on_previous_step_ == 0) {
} else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
// Normal limit is the end of the current page.
allocation_info_.set_limit(to_space_.page_high());
} else {
......@@ -1548,9 +1569,9 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
void NewSpace::StartNextInlineAllocationStep() {
if (!inline_allocation_observers_paused_) {
if (!allocation_observers_paused_) {
top_on_previous_step_ =
inline_allocation_observers_.length() ? allocation_info_.top() : 0;
allocation_observers_->length() ? allocation_info_.top() : 0;
UpdateInlineAllocationLimit(0);
}
}
......@@ -1558,44 +1579,36 @@ void NewSpace::StartNextInlineAllocationStep() {
intptr_t NewSpace::GetNextInlineAllocationStepSize() {
intptr_t next_step = 0;
for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
InlineAllocationObserver* o = inline_allocation_observers_[i];
for (int i = 0; i < allocation_observers_->length(); ++i) {
AllocationObserver* o = (*allocation_observers_)[i];
next_step = next_step ? Min(next_step, o->bytes_to_next_step())
: o->bytes_to_next_step();
}
DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0);
DCHECK(allocation_observers_->length() == 0 || next_step != 0);
return next_step;
}
void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) {
inline_allocation_observers_.Add(observer);
void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
Space::AddAllocationObserver(observer);
StartNextInlineAllocationStep();
}
void NewSpace::RemoveInlineAllocationObserver(
InlineAllocationObserver* observer) {
bool removed = inline_allocation_observers_.RemoveElement(observer);
// Only used in assertion. Suppress unused variable warning.
static_cast<void>(removed);
DCHECK(removed);
void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
Space::RemoveAllocationObserver(observer);
StartNextInlineAllocationStep();
}
void NewSpace::PauseInlineAllocationObservers() {
void NewSpace::PauseAllocationObservers() {
// Do a step to account for memory allocated so far.
InlineAllocationStep(top(), top(), nullptr, 0);
inline_allocation_observers_paused_ = true;
Space::PauseAllocationObservers();
top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
}
void NewSpace::ResumeInlineAllocationObservers() {
void NewSpace::ResumeAllocationObservers() {
DCHECK(top_on_previous_step_ == 0);
inline_allocation_observers_paused_ = false;
Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
}
......@@ -1604,9 +1617,9 @@ void NewSpace::InlineAllocationStep(Address top, Address new_top,
Address soon_object, size_t size) {
if (top_on_previous_step_) {
int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated,
soon_object, size);
for (int i = 0; i < allocation_observers_->length(); ++i) {
(*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
size);
}
top_on_previous_step_ = new_top;
}
......@@ -2500,6 +2513,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == nullptr) return nullptr;
owner_->AllocationStep(new_node->address(), size_in_bytes);
int bytes_left = new_node_size - size_in_bytes;
DCHECK(bytes_left >= 0);
......@@ -3011,6 +3025,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
}
heap()->incremental_marking()->OldSpaceStep(object_size);
AllocationStep(object->address(), object_size);
return object;
}
......
......@@ -20,10 +20,10 @@ namespace v8 {
namespace internal {
class AllocationInfo;
class AllocationObserver;
class CompactionSpace;
class CompactionSpaceCollection;
class FreeList;
class InlineAllocationObserver;
class Isolate;
class MemoryAllocator;
class MemoryChunk;
......@@ -902,7 +902,9 @@ class LargePage : public MemoryChunk {
class Space : public Malloced {
public:
Space(Heap* heap, AllocationSpace id, Executability executable)
: heap_(heap),
: allocation_observers_(new List<AllocationObserver*>()),
allocation_observers_paused_(false),
heap_(heap),
id_(id),
executable_(executable),
committed_(0),
......@@ -918,6 +920,26 @@ class Space : public Malloced {
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
virtual void AddAllocationObserver(AllocationObserver* observer) {
allocation_observers_->Add(observer);
}
virtual void RemoveAllocationObserver(AllocationObserver* observer) {
bool removed = allocation_observers_->RemoveElement(observer);
USE(removed);
DCHECK(removed);
}
virtual void PauseAllocationObservers() {
allocation_observers_paused_ = true;
}
virtual void ResumeAllocationObservers() {
allocation_observers_paused_ = false;
}
void AllocationStep(Address soon_object, int size);
// Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers.
virtual intptr_t CommittedMemory() { return committed_; }
......@@ -964,6 +986,9 @@ class Space : public Malloced {
DCHECK_GE(committed_, 0);
}
v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
bool allocation_observers_paused_;
private:
Heap* heap_;
AllocationSpace id_;
......@@ -2485,8 +2510,7 @@ class NewSpace : public Space {
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
top_on_previous_step_(0),
inline_allocation_observers_paused_(false) {}
top_on_previous_step_(0) {}
inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a);
......@@ -2636,21 +2660,25 @@ class NewSpace : public Space {
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
// When inline allocation stepping is active, either because of incremental
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
// inline allocation every once in a while. This is done by setting
// allocation_info_.limit to be lower than the actual limit and and increasing
// it in steps to guarantee that the observers are notified periodically.
void UpdateInlineAllocationLimit(int size_in_bytes);
void DisableInlineAllocationSteps() {
top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
}
// Allows observation of inline allocation. The observer->Step() method gets
// called after every step_size bytes have been allocated (approximately).
// This works by adjusting the allocation limit to a lower value and adjusting
// it after each step.
void AddInlineAllocationObserver(InlineAllocationObserver* observer);
void AddAllocationObserver(AllocationObserver* observer) override;
// Removes a previously installed observer.
void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
void DisableInlineAllocationSteps() {
top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
}
void RemoveAllocationObserver(AllocationObserver* observer) override;
// Get the extent of the inactive semispace (for use as a marking stack,
// or to zap it). Notice: space-addresses are not necessarily on the
......@@ -2714,6 +2742,9 @@ class NewSpace : public Space {
SemiSpace* active_space() { return &to_space_; }
void PauseAllocationObservers() override;
void ResumeAllocationObservers() override;
private:
// Update allocation info to match the current to-space page.
void UpdateAllocationInfo();
......@@ -2736,14 +2767,7 @@ class NewSpace : public Space {
// mark-compact collection.
AllocationInfo allocation_info_;
// When inline allocation stepping is active, either because of incremental
// marking or because of idle scavenge, we 'interrupt' inline allocation every
// once in a while. This is done by setting allocation_info_.limit to be lower
// than the actual limit and and increasing it in steps to guarantee that the
// observers are notified periodically.
List<InlineAllocationObserver*> inline_allocation_observers_;
Address top_on_previous_step_;
bool inline_allocation_observers_paused_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
......@@ -2760,26 +2784,18 @@ class NewSpace : public Space {
size_t size);
intptr_t GetNextInlineAllocationStepSize();
void StartNextInlineAllocationStep();
void PauseInlineAllocationObservers();
void ResumeInlineAllocationObservers();
friend class PauseInlineAllocationObserversScope;
friend class SemiSpaceIterator;
};
class PauseInlineAllocationObserversScope {
class PauseAllocationObserversScope {
public:
explicit PauseInlineAllocationObserversScope(NewSpace* new_space)
: new_space_(new_space) {
new_space_->PauseInlineAllocationObservers();
}
~PauseInlineAllocationObserversScope() {
new_space_->ResumeInlineAllocationObservers();
}
explicit PauseAllocationObserversScope(Heap* heap);
~PauseAllocationObserversScope();
private:
NewSpace* new_space_;
DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope);
Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
};
// -----------------------------------------------------------------------------
......
......@@ -16,23 +16,52 @@
namespace v8 {
namespace internal {
// We sample with a Poisson process, with constant average sampling interval.
// This follows the exponential probability distribution with parameter
// λ = 1/rate where rate is the average number of bytes between samples.
//
// Let u be a uniformly distributed random number between 0 and 1, then
// next_sample = (- ln u) / λ
intptr_t SamplingAllocationObserver::GetNextSampleInterval(uint64_t rate) {
if (FLAG_sampling_heap_profiler_suppress_randomness) {
return rate;
}
double u = random_->NextDouble();
double next = (-std::log(u)) * rate;
return next < kPointerSize
? kPointerSize
: (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
}
SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
uint64_t rate, int stack_depth)
: InlineAllocationObserver(GetNextSampleInterval(
heap->isolate()->random_number_generator(), rate)),
isolate_(heap->isolate()),
: isolate_(heap->isolate()),
heap_(heap),
random_(isolate_->random_number_generator()),
new_space_observer_(new SamplingAllocationObserver(
heap_, rate, rate, this, heap->isolate()->random_number_generator())),
other_spaces_observer_(new SamplingAllocationObserver(
heap_, rate, rate, this, heap->isolate()->random_number_generator())),
names_(names),
samples_(),
rate_(rate),
stack_depth_(stack_depth) {
heap->new_space()->AddInlineAllocationObserver(this);
heap->new_space()->AddAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
if (space != heap->new_space()) {
space->AddAllocationObserver(other_spaces_observer_.get());
}
}
}
SamplingHeapProfiler::~SamplingHeapProfiler() {
heap_->new_space()->RemoveInlineAllocationObserver(this);
heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap_);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
if (space != heap_->new_space()) {
space->RemoveAllocationObserver(other_spaces_observer_.get());
}
}
// Clear samples and drop all the weak references we are keeping.
std::set<SampledAllocation*>::iterator it;
......@@ -43,13 +72,6 @@ SamplingHeapProfiler::~SamplingHeapProfiler() {
samples_.swap(empty);
}
void SamplingHeapProfiler::Step(int bytes_allocated, Address soon_object,
size_t size) {
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
DCHECK(soon_object);
SampleObject(soon_object, size);
}
void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
DisallowHeapAllocation no_allocation;
......@@ -70,25 +92,6 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
}
// We sample with a Poisson process, with constant average sampling interval.
// This follows the exponential probability distribution with parameter
// λ = 1/rate where rate is the average number of bytes between samples.
//
// Let u be a uniformly distributed random number between 0 and 1, then
// next_sample = (- ln u) / λ
intptr_t SamplingHeapProfiler::GetNextSampleInterval(
base::RandomNumberGenerator* random, uint64_t rate) {
if (FLAG_sampling_heap_profiler_suppress_randomness) {
return rate;
}
double u = random->NextDouble();
double next = (-std::log(u)) * rate;
return next < kPointerSize
? kPointerSize
: (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
}
void SamplingHeapProfiler::SampledAllocation::OnWeakCallback(
const WeakCallbackInfo<SampledAllocation>& data) {
SampledAllocation* sample = data.GetParameter();
......@@ -159,8 +162,7 @@ SamplingHeapProfiler::SampledAllocation::SampledAllocation(
}
}
SamplingHeapProfiler::Node* SamplingHeapProfiler::AllocateNode(
v8::AllocationProfile::Node* SamplingHeapProfiler::AllocateNode(
AllocationProfile* profile, const std::map<int, Script*>& scripts,
FunctionInfo* function_info) {
DCHECK(function_info->get_name());
......@@ -180,37 +182,36 @@ SamplingHeapProfiler::Node* SamplingHeapProfiler::AllocateNode(
function_info->get_start_position());
}
profile->nodes().push_back(
Node({ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(
function_info->get_name())),
ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(
function_info->get_script_name())),
function_info->get_script_id(), function_info->get_start_position(),
line, column, std::vector<Node*>(),
std::vector<v8::AllocationProfile::Allocation>()}));
profile->nodes().push_back(v8::AllocationProfile::Node(
{ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(
function_info->get_name())),
ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(
function_info->get_script_name())),
function_info->get_script_id(), function_info->get_start_position(),
line, column, std::vector<v8::AllocationProfile::Node*>(),
std::vector<v8::AllocationProfile::Allocation>()}));
return &profile->nodes().back();
}
SamplingHeapProfiler::Node* SamplingHeapProfiler::FindOrAddChildNode(
v8::AllocationProfile::Node* SamplingHeapProfiler::FindOrAddChildNode(
AllocationProfile* profile, const std::map<int, Script*>& scripts,
Node* parent, FunctionInfo* function_info) {
for (Node* child : parent->children) {
v8::AllocationProfile::Node* parent, FunctionInfo* function_info) {
for (v8::AllocationProfile::Node* child : parent->children) {
if (child->script_id == function_info->get_script_id() &&
child->start_position == function_info->get_start_position())
return child;
}
Node* child = AllocateNode(profile, scripts, function_info);
v8::AllocationProfile::Node* child =
AllocateNode(profile, scripts, function_info);
parent->children.push_back(child);
return child;
}
SamplingHeapProfiler::Node* SamplingHeapProfiler::AddStack(
v8::AllocationProfile::Node* SamplingHeapProfiler::AddStack(
AllocationProfile* profile, const std::map<int, Script*>& scripts,
const std::vector<FunctionInfo*>& stack) {
Node* node = profile->GetRootNode();
v8::AllocationProfile::Node* node = profile->GetRootNode();
// We need to process the stack in reverse order as the top of the stack is
// the first element in the list.
......@@ -241,7 +242,8 @@ v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
AllocateNode(profile, scripts, &function_info);
for (SampledAllocation* allocation : samples_) {
Node* node = AddStack(profile, scripts, allocation->get_stack());
v8::AllocationProfile::Node* node =
AddStack(profile, scripts, allocation->get_stack());
node->allocations.push_back({allocation->get_size(), 1});
}
......
......@@ -20,24 +20,25 @@ class RandomNumberGenerator;
namespace internal {
class SamplingAllocationObserver;
class AllocationProfile : public v8::AllocationProfile {
public:
AllocationProfile() : nodes_() {}
Node* GetRootNode() override {
v8::AllocationProfile::Node* GetRootNode() override {
return nodes_.size() == 0 ? nullptr : &nodes_.front();
}
std::deque<Node>& nodes() { return nodes_; }
std::deque<v8::AllocationProfile::Node>& nodes() { return nodes_; }
private:
std::deque<Node> nodes_;
std::deque<v8::AllocationProfile::Node> nodes_;
DISALLOW_COPY_AND_ASSIGN(AllocationProfile);
};
class SamplingHeapProfiler : public InlineAllocationObserver {
class SamplingHeapProfiler {
public:
SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
int stack_depth);
......@@ -45,11 +46,6 @@ class SamplingHeapProfiler : public InlineAllocationObserver {
v8::AllocationProfile* GetAllocationProfile();
void Step(int bytes_allocated, Address soon_object, size_t size) override;
intptr_t GetNextStepSize() override {
return GetNextSampleInterval(random_, rate_);
}
StringsStorage* names() const { return names_; }
class FunctionInfo {
......@@ -99,35 +95,61 @@ class SamplingHeapProfiler : public InlineAllocationObserver {
};
private:
using Node = v8::AllocationProfile::Node;
Heap* heap() const { return heap_; }
void SampleObject(Address soon_object, size_t size);
static intptr_t GetNextSampleInterval(base::RandomNumberGenerator* random,
uint64_t rate);
// Methods that construct v8::AllocationProfile.
Node* AddStack(AllocationProfile* profile,
const std::map<int, Script*>& scripts,
const std::vector<FunctionInfo*>& stack);
Node* FindOrAddChildNode(AllocationProfile* profile,
const std::map<int, Script*>& scripts, Node* parent,
FunctionInfo* function_info);
Node* AllocateNode(AllocationProfile* profile,
const std::map<int, Script*>& scripts,
FunctionInfo* function_info);
v8::AllocationProfile::Node* AddStack(
AllocationProfile* profile, const std::map<int, Script*>& scripts,
const std::vector<FunctionInfo*>& stack);
v8::AllocationProfile::Node* FindOrAddChildNode(
AllocationProfile* profile, const std::map<int, Script*>& scripts,
v8::AllocationProfile::Node* parent, FunctionInfo* function_info);
v8::AllocationProfile::Node* AllocateNode(
AllocationProfile* profile, const std::map<int, Script*>& scripts,
FunctionInfo* function_info);
Isolate* const isolate_;
Heap* const heap_;
base::RandomNumberGenerator* const random_;
base::SmartPointer<SamplingAllocationObserver> new_space_observer_;
base::SmartPointer<SamplingAllocationObserver> other_spaces_observer_;
StringsStorage* const names_;
std::set<SampledAllocation*> samples_;
const uint64_t rate_;
const int stack_depth_;
friend class SamplingAllocationObserver;
};
class SamplingAllocationObserver : public AllocationObserver {
public:
SamplingAllocationObserver(Heap* heap, intptr_t step_size, uint64_t rate,
SamplingHeapProfiler* profiler,
base::RandomNumberGenerator* random)
: AllocationObserver(step_size),
profiler_(profiler),
heap_(heap),
random_(random),
rate_(rate) {}
virtual ~SamplingAllocationObserver() {}
protected:
void Step(int bytes_allocated, Address soon_object, size_t size) override {
USE(heap_);
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
DCHECK(soon_object);
profiler_->SampleObject(soon_object, size);
}
intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
private:
intptr_t GetNextSampleInterval(uint64_t rate);
SamplingHeapProfiler* const profiler_;
Heap* const heap_;
base::RandomNumberGenerator* const random_;
uint64_t const rate_;
};
} // namespace internal
} // namespace v8
......
......@@ -523,10 +523,27 @@ static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
return filler;
}
class Observer : public InlineAllocationObserver {
static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
CHECK(!allocation.IsRetry());
HeapObject* filler = NULL;
CHECK(allocation.To(&filler));
space->heap()->CreateFillerObjectAt(filler->address(), size);
return filler;
}
static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
CHECK(!allocation.IsRetry());
HeapObject* filler = NULL;
CHECK(allocation.To(&filler));
return filler;
}
class Observer : public AllocationObserver {
public:
explicit Observer(intptr_t step_size)
: InlineAllocationObserver(step_size), count_(0) {}
: AllocationObserver(step_size), count_(0) {}
void Step(int bytes_allocated, Address, size_t) override { count_++; }
......@@ -536,85 +553,93 @@ class Observer : public InlineAllocationObserver {
int count_;
};
template <typename T>
void testAllocationObserver(Isolate* i_isolate, T* space) {
Observer observer1(128);
space->AddAllocationObserver(&observer1);
UNINITIALIZED_TEST(InlineAllocationObserver) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// The observer should not get notified if we have only allocated less than
// 128 bytes.
AllocateUnaligned(space, 64);
CHECK_EQ(observer1.count(), 0);
NewSpace* new_space = i_isolate->heap()->new_space();
// The observer should get called when we have allocated exactly 128 bytes.
AllocateUnaligned(space, 64);
CHECK_EQ(observer1.count(), 1);
Observer observer1(128);
new_space->AddInlineAllocationObserver(&observer1);
// Another >128 bytes should get another notification.
AllocateUnaligned(space, 136);
CHECK_EQ(observer1.count(), 2);
// The observer should not get notified if we have only allocated less than
// 128 bytes.
AllocateUnaligned(new_space, 64);
CHECK_EQ(observer1.count(), 0);
// Allocating a large object should get only one notification.
AllocateUnaligned(space, 1024);
CHECK_EQ(observer1.count(), 3);
// The observer should get called when we have allocated exactly 128 bytes.
AllocateUnaligned(new_space, 64);
CHECK_EQ(observer1.count(), 1);
// Allocating another 2048 bytes in small objects should get 16
// notifications.
for (int i = 0; i < 64; ++i) {
AllocateUnaligned(space, 32);
}
CHECK_EQ(observer1.count(), 19);
// Another >128 bytes should get another notification.
AllocateUnaligned(new_space, 136);
CHECK_EQ(observer1.count(), 2);
// Multiple observers should work.
Observer observer2(96);
space->AddAllocationObserver(&observer2);
// Allocating a large object should get only one notification.
AllocateUnaligned(new_space, 1024);
CHECK_EQ(observer1.count(), 3);
AllocateUnaligned(space, 2048);
CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer2.count(), 1);
// Allocating another 2048 bytes in small objects should get 16
// notifications.
for (int i = 0; i < 64; ++i) {
AllocateUnaligned(new_space, 32);
}
CHECK_EQ(observer1.count(), 19);
AllocateUnaligned(space, 104);
CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer2.count(), 2);
// Multiple observers should work.
Observer observer2(96);
new_space->AddInlineAllocationObserver(&observer2);
// Callback should stop getting called after an observer is removed.
space->RemoveAllocationObserver(&observer1);
AllocateUnaligned(new_space, 2048);
CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer2.count(), 1);
AllocateUnaligned(space, 384);
CHECK_EQ(observer1.count(), 20); // no more notifications.
CHECK_EQ(observer2.count(), 3); // this one is still active.
AllocateUnaligned(new_space, 104);
CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer2.count(), 2);
// Ensure that PauseInlineAllocationObserversScope work correctly.
AllocateUnaligned(space, 48);
CHECK_EQ(observer2.count(), 3);
{
PauseAllocationObserversScope pause_observers(i_isolate->heap());
CHECK_EQ(observer2.count(), 3);
AllocateUnaligned(space, 384);
CHECK_EQ(observer2.count(), 3);
}
CHECK_EQ(observer2.count(), 3);
// Coupled with the 48 bytes allocated before the pause, another 48 bytes
// allocated here should trigger a notification.
AllocateUnaligned(space, 48);
CHECK_EQ(observer2.count(), 4);
space->RemoveAllocationObserver(&observer2);
AllocateUnaligned(space, 384);
CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer2.count(), 4);
}
// Callback should stop getting called after an observer is removed.
new_space->RemoveInlineAllocationObserver(&observer1);
UNINITIALIZED_TEST(AllocationObserver) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
AllocateUnaligned(new_space, 384);
CHECK_EQ(observer1.count(), 20); // no more notifications.
CHECK_EQ(observer2.count(), 3); // this one is still active.
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// Ensure that PauseInlineAllocationObserversScope work correctly.
AllocateUnaligned(new_space, 48);
CHECK_EQ(observer2.count(), 3);
{
PauseInlineAllocationObserversScope pause_observers(new_space);
CHECK_EQ(observer2.count(), 3);
AllocateUnaligned(new_space, 384);
CHECK_EQ(observer2.count(), 3);
}
CHECK_EQ(observer2.count(), 3);
// Coupled with the 48 bytes allocated before the pause, another 48 bytes
// allocated here should trigger a notification.
AllocateUnaligned(new_space, 48);
CHECK_EQ(observer2.count(), 4);
new_space->RemoveInlineAllocationObserver(&observer2);
AllocateUnaligned(new_space, 384);
CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer2.count(), 4);
testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
// Old space is used but the code path is shared for all
// classes inheriting from PagedSpace.
testAllocationObserver<PagedSpace>(i_isolate,
i_isolate->heap()->old_space());
testAllocationObserver<LargeObjectSpace>(i_isolate,
i_isolate->heap()->lo_space());
}
isolate->Dispose();
}
......@@ -634,16 +659,16 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
NewSpace* new_space = i_isolate->heap()->new_space();
Observer observer1(512);
new_space->AddInlineAllocationObserver(&observer1);
new_space->AddAllocationObserver(&observer1);
Observer observer2(576);
new_space->AddInlineAllocationObserver(&observer2);
new_space->AddAllocationObserver(&observer2);
for (int i = 0; i < 512; ++i) {
AllocateUnaligned(new_space, 32);
}
new_space->RemoveInlineAllocationObserver(&observer1);
new_space->RemoveInlineAllocationObserver(&observer2);
new_space->RemoveAllocationObserver(&observer1);
new_space->RemoveAllocationObserver(&observer2);
CHECK_EQ(observer1.count(), 32);
CHECK_EQ(observer2.count(), 28);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment