Commit 258f270f authored by Ali Ijaz Sheikh's avatar Ali Ijaz Sheikh Committed by Commit Bot

Revert "[profiler] proper observation of old space inline allocations"

This reverts commit 672a41c3.

Reason for revert: Linux64 TSAN bot failures

Original change's description:
> [profiler] proper observation of old space inline allocations
> 
> Bug: chromium:633920
> Change-Id: I9a2f4a89f6b9c0f63cb3b166b06a88a12f0a203c
> Reviewed-on: https://chromium-review.googlesource.com/631696
> Commit-Queue: Ali Ijaz Sheikh <ofrobots@google.com>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#48043}

TBR=ulan@chromium.org,mlippautz@chromium.org,ofrobots@google.com

Change-Id: Ib71baf69b29b067fa0ba76027170054b8faa78d3
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:633920
Reviewed-on: https://chromium-review.googlesource.com/669559Reviewed-by: 's avatarAli Ijaz Sheikh <ofrobots@google.com>
Commit-Queue: Ali Ijaz Sheikh <ofrobots@google.com>
Cr-Commit-Position: refs/heads/master@{#48052}
parent 2b252275
...@@ -369,11 +369,6 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes, ...@@ -369,11 +369,6 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes, AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) { AllocationAlignment alignment) {
DCHECK(top() >= top_on_previous_step_);
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT #ifdef V8_HOST_ARCH_32_BIT
AllocationResult result = AllocationResult result =
alignment == kDoubleAligned alignment == kDoubleAligned
...@@ -383,13 +378,11 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes, ...@@ -383,13 +378,11 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult result = AllocateRawUnaligned(size_in_bytes); AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif #endif
HeapObject* heap_obj = nullptr; HeapObject* heap_obj = nullptr;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) { if (!result.IsRetry() && result.To(&heap_obj)) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last), AllocationStep(heap_obj->address(), size_in_bytes);
heap_obj->address(), size_in_bytes);
DCHECK_IMPLIES( DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(), heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj)); heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
StartNextInlineAllocationStep();
} }
return result; return result;
} }
......
...@@ -1328,7 +1328,6 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == ...@@ -1328,7 +1328,6 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
void Space::AddAllocationObserver(AllocationObserver* observer) { void Space::AddAllocationObserver(AllocationObserver* observer) {
allocation_observers_.push_back(observer); allocation_observers_.push_back(observer);
StartNextInlineAllocationStep();
} }
void Space::RemoveAllocationObserver(AllocationObserver* observer) { void Space::RemoveAllocationObserver(AllocationObserver* observer) {
...@@ -1336,7 +1335,6 @@ void Space::RemoveAllocationObserver(AllocationObserver* observer) { ...@@ -1336,7 +1335,6 @@ void Space::RemoveAllocationObserver(AllocationObserver* observer) {
allocation_observers_.end(), observer); allocation_observers_.end(), observer);
DCHECK(allocation_observers_.end() != it); DCHECK(allocation_observers_.end() != it);
allocation_observers_.erase(it); allocation_observers_.erase(it);
StartNextInlineAllocationStep();
} }
void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; } void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
...@@ -1345,12 +1343,11 @@ void Space::ResumeAllocationObservers() { ...@@ -1345,12 +1343,11 @@ void Space::ResumeAllocationObservers() {
allocation_observers_paused_ = false; allocation_observers_paused_ = false;
} }
void Space::AllocationStep(int bytes_since_last, Address soon_object, void Space::AllocationStep(Address soon_object, int size) {
int size) {
if (!allocation_observers_paused_) { if (!allocation_observers_paused_) {
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo); heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
for (AllocationObserver* observer : allocation_observers_) { for (AllocationObserver* observer : allocation_observers_) {
observer->AllocationStep(bytes_since_last, soon_object, size); observer->AllocationStep(size, soon_object, size);
} }
} }
} }
...@@ -1370,8 +1367,7 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, ...@@ -1370,8 +1367,7 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
: Space(heap, space, executable), : Space(heap, space, executable),
anchor_(this), anchor_(this),
free_list_(this), free_list_(this),
locked_page_(nullptr), locked_page_(nullptr) {
top_on_previous_step_(0) {
area_size_ = MemoryAllocator::PageAreaSize(space); area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear(); accounting_stats_.Clear();
...@@ -1600,48 +1596,6 @@ void PagedSpace::SetAllocationInfo(Address top, Address limit) { ...@@ -1600,48 +1596,6 @@ void PagedSpace::SetAllocationInfo(Address top, Address limit) {
} }
} }
void PagedSpace::DecreaseLimit(Address new_limit) {
Address old_limit = limit();
DCHECK_LE(top(), new_limit);
DCHECK_GE(old_limit, new_limit);
if (new_limit != old_limit) {
SetTopAndLimit(top(), new_limit);
Free(new_limit, old_limit - new_limit);
if (heap()->incremental_marking()->black_allocation()) {
Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
old_limit);
}
}
}
Address PagedSpace::ComputeLimit(Address start, Address end,
size_t size_in_bytes) {
DCHECK_GE(end - start, size_in_bytes);
if (heap()->inline_allocation_disabled()) {
// Keep the linear allocation area to fit exactly the requested size.
return start + size_in_bytes;
} else if (!allocation_observers_paused_ && !allocation_observers_.empty() &&
identity() == OLD_SPACE && !is_local()) {
// Generated code may allocate inline from the linear allocation area for
// Old Space. To make sure we can observe these allocations, we use a lower
// limit.
size_t step = RoundSizeDownToObjectAlignment(
static_cast<int>(GetNextInlineAllocationStepSize()));
return Max(start + size_in_bytes, Min(start + step, end));
} else {
// The entire node can be used as the linear allocation area.
return end;
}
}
void PagedSpace::StartNextInlineAllocationStep() {
if (!allocation_observers_paused_ && SupportsInlineAllocation()) {
top_on_previous_step_ = allocation_observers_.empty() ? 0 : top();
DecreaseLimit(ComputeLimit(top(), limit(), 0));
}
}
void PagedSpace::MarkAllocationInfoBlack() { void PagedSpace::MarkAllocationInfoBlack() {
DCHECK(heap()->incremental_marking()->black_allocation()); DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top(); Address current_top = top();
...@@ -1687,12 +1641,6 @@ void PagedSpace::EmptyAllocationInfo() { ...@@ -1687,12 +1641,6 @@ void PagedSpace::EmptyAllocationInfo() {
} }
} }
if (top_on_previous_step_) {
DCHECK(current_top >= top_on_previous_step_);
AllocationStep(static_cast<int>(current_top - top_on_previous_step_),
nullptr, 0);
top_on_previous_step_ = 0;
}
SetTopAndLimit(NULL, NULL); SetTopAndLimit(NULL, NULL);
DCHECK_GE(current_limit, current_top); DCHECK_GE(current_limit, current_top);
Free(current_top, current_limit - current_top); Free(current_top, current_limit - current_top);
...@@ -2135,6 +2083,16 @@ void NewSpace::StartNextInlineAllocationStep() { ...@@ -2135,6 +2083,16 @@ void NewSpace::StartNextInlineAllocationStep() {
} }
} }
void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
Space::AddAllocationObserver(observer);
StartNextInlineAllocationStep();
}
void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
Space::RemoveAllocationObserver(observer);
StartNextInlineAllocationStep();
}
void NewSpace::PauseAllocationObservers() { void NewSpace::PauseAllocationObservers() {
// Do a step to account for memory allocated so far. // Do a step to account for memory allocated so far.
InlineAllocationStep(top(), top(), nullptr, 0); InlineAllocationStep(top(), top(), nullptr, 0);
...@@ -2143,28 +2101,12 @@ void NewSpace::PauseAllocationObservers() { ...@@ -2143,28 +2101,12 @@ void NewSpace::PauseAllocationObservers() {
UpdateInlineAllocationLimit(0); UpdateInlineAllocationLimit(0);
} }
void PagedSpace::PauseAllocationObservers() {
// Do a step to account for memory allocated so far.
if (top_on_previous_step_) {
int bytes_allocated = static_cast<int>(top() - top_on_previous_step_);
AllocationStep(bytes_allocated, nullptr, 0);
}
Space::PauseAllocationObservers();
top_on_previous_step_ = 0;
}
void NewSpace::ResumeAllocationObservers() { void NewSpace::ResumeAllocationObservers() {
DCHECK(top_on_previous_step_ == 0); DCHECK(top_on_previous_step_ == 0);
Space::ResumeAllocationObservers(); Space::ResumeAllocationObservers();
StartNextInlineAllocationStep(); StartNextInlineAllocationStep();
} }
// TODO(ofrobots): refactor into SpaceWithLinearArea
void PagedSpace::ResumeAllocationObservers() {
DCHECK(top_on_previous_step_ == 0);
Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
}
void NewSpace::InlineAllocationStep(Address top, Address new_top, void NewSpace::InlineAllocationStep(Address top, Address new_top,
Address soon_object, size_t size) { Address soon_object, size_t size) {
...@@ -2939,6 +2881,7 @@ bool FreeList::Allocate(size_t size_in_bytes) { ...@@ -2939,6 +2881,7 @@ bool FreeList::Allocate(size_t size_in_bytes) {
if (new_node == nullptr) return false; if (new_node == nullptr) return false;
DCHECK_GE(new_node_size, size_in_bytes); DCHECK_GE(new_node_size, size_in_bytes);
size_t bytes_left = new_node_size - size_in_bytes;
#ifdef DEBUG #ifdef DEBUG
for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) { for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
...@@ -2952,21 +2895,38 @@ bool FreeList::Allocate(size_t size_in_bytes) { ...@@ -2952,21 +2895,38 @@ bool FreeList::Allocate(size_t size_in_bytes) {
// candidate. // candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
// Memory in the linear allocation area is counted as allocated. We may free // Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below. // a little of this again immediately - see below.
owner_->IncreaseAllocatedBytes(new_node_size, owner_->IncreaseAllocatedBytes(new_node_size,
Page::FromAddress(new_node->address())); Page::FromAddress(new_node->address()));
Address start = new_node->address(); if (owner_->heap()->inline_allocation_disabled()) {
Address end = new_node->address() + new_node_size; // Keep the linear allocation area to fit exactly the requested size.
Address limit = owner_->ComputeLimit(start, end, size_in_bytes); // Return the rest to the free list.
DCHECK_LE(limit, end); owner_->Free(new_node->address() + size_in_bytes, bytes_left);
DCHECK_LE(size_in_bytes, limit - start); owner_->SetAllocationInfo(new_node->address(),
if (limit != end) { new_node->address() + size_in_bytes);
owner_->Free(limit, end - limit); } else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking &&
!owner_->is_local()) { // Not needed on CompactionSpaces.
size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
DCHECK_GE(new_node_size, size_in_bytes + linear_size);
owner_->Free(new_node->address() + size_in_bytes + linear_size,
new_node_size - size_in_bytes - linear_size);
owner_->SetAllocationInfo(
new_node->address(), new_node->address() + size_in_bytes + linear_size);
} else {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetAllocationInfo(new_node->address(),
new_node->address() + new_node_size);
} }
owner_->SetAllocationInfo(start, limit);
return true; return true;
} }
...@@ -3354,7 +3314,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, ...@@ -3354,7 +3314,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
if (heap()->incremental_marking()->black_allocation()) { if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object); heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
} }
AllocationStep(object_size, object->address(), object_size); AllocationStep(object->address(), object_size);
DCHECK_IMPLIES( DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(), heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object)); heap()->incremental_marking()->marking_state()->IsBlack(object));
......
...@@ -903,17 +903,17 @@ class Space : public Malloced { ...@@ -903,17 +903,17 @@ class Space : public Malloced {
// Identity used in error reporting. // Identity used in error reporting.
AllocationSpace identity() { return id_; } AllocationSpace identity() { return id_; }
void AddAllocationObserver(AllocationObserver* observer); V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
AllocationObserver* observer);
void RemoveAllocationObserver(AllocationObserver* observer); V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
AllocationObserver* observer);
V8_EXPORT_PRIVATE virtual void PauseAllocationObservers(); V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers(); V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {} void AllocationStep(Address soon_object, int size);
void AllocationStep(int bytes_since_last, Address soon_object, int size);
// Return the total amount committed memory for this space, i.e., allocatable // Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers. // memory and page headers.
...@@ -2071,8 +2071,15 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) { ...@@ -2071,8 +2071,15 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
void ResetFreeList() { free_list_.Reset(); } void ResetFreeList() { free_list_.Reset(); }
void PauseAllocationObservers() override; // Set space allocation info.
void ResumeAllocationObservers() override; void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit);
}
void SetAllocationInfo(Address top, Address limit);
// Empty space allocation info, returning unused area to free list. // Empty space allocation info, returning unused area to free list.
void EmptyAllocationInfo(); void EmptyAllocationInfo();
...@@ -2177,21 +2184,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) { ...@@ -2177,21 +2184,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// multiple tasks hold locks on pages while trying to sweep each others pages. // multiple tasks hold locks on pages while trying to sweep each others pages.
void AnnounceLockedPage(Page* page) { locked_page_ = page; } void AnnounceLockedPage(Page* page) { locked_page_ = page; }
Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
void SetAllocationInfo(Address top, Address limit);
private:
// Set space allocation info.
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit);
}
void DecreaseLimit(Address new_limit);
void StartNextInlineAllocationStep() override;
bool SupportsInlineAllocation() { return identity() == OLD_SPACE; }
protected: protected:
// PagedSpaces that should be included in snapshots have different, i.e., // PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages. // smaller, initial pages.
...@@ -2254,7 +2246,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) { ...@@ -2254,7 +2246,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
base::Mutex space_mutex_; base::Mutex space_mutex_;
Page* locked_page_; Page* locked_page_;
Address top_on_previous_step_;
friend class IncrementalMarking; friend class IncrementalMarking;
friend class MarkCompactCollector; friend class MarkCompactCollector;
...@@ -2656,6 +2647,14 @@ class NewSpace : public Space { ...@@ -2656,6 +2647,14 @@ class NewSpace : public Space {
UpdateInlineAllocationLimit(0); UpdateInlineAllocationLimit(0);
} }
// Allows observation of inline allocation. The observer->Step() method gets
// called after every step_size bytes have been allocated (approximately).
// This works by adjusting the allocation limit to a lower value and adjusting
// it after each step.
void AddAllocationObserver(AllocationObserver* observer) override;
void RemoveAllocationObserver(AllocationObserver* observer) override;
// Get the extent of the inactive semispace (for use as a marking stack, // Get the extent of the inactive semispace (for use as a marking stack,
// or to zap it). Notice: space-addresses are not necessarily on the // or to zap it). Notice: space-addresses are not necessarily on the
// same page, so FromSpaceStart() might be above FromSpaceEnd(). // same page, so FromSpaceStart() might be above FromSpaceEnd().
...@@ -2762,7 +2761,7 @@ class NewSpace : public Space { ...@@ -2762,7 +2761,7 @@ class NewSpace : public Space {
// different when we cross a page boundary or reset the space. // different when we cross a page boundary or reset the space.
void InlineAllocationStep(Address top, Address new_top, Address soon_object, void InlineAllocationStep(Address top, Address new_top, Address soon_object,
size_t size); size_t size);
void StartNextInlineAllocationStep() override; void StartNextInlineAllocationStep();
friend class SemiSpaceIterator; friend class SemiSpaceIterator;
}; };
......
...@@ -172,11 +172,8 @@ class SamplingAllocationObserver : public AllocationObserver { ...@@ -172,11 +172,8 @@ class SamplingAllocationObserver : public AllocationObserver {
void Step(int bytes_allocated, Address soon_object, size_t size) override { void Step(int bytes_allocated, Address soon_object, size_t size) override {
USE(heap_); USE(heap_);
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC); DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
if (soon_object) { DCHECK(soon_object);
// TODO(ofrobots): it would be better to sample the next object rather profiler_->SampleObject(soon_object, size);
// than skipping this sample epoch if soon_object happens to be null.
profiler_->SampleObject(soon_object, size);
}
} }
intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); } intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
......
...@@ -203,7 +203,7 @@ void ForceEvacuationCandidate(Page* page) { ...@@ -203,7 +203,7 @@ void ForceEvacuationCandidate(Page* page) {
int remaining = static_cast<int>(limit - top); int remaining = static_cast<int>(limit - top);
space->heap()->CreateFillerObjectAt(top, remaining, space->heap()->CreateFillerObjectAt(top, remaining,
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
space->EmptyAllocationInfo(); space->SetTopAndLimit(nullptr, nullptr);
} }
} }
......
...@@ -3063,77 +3063,3 @@ TEST(SamplingHeapProfilerLeftTrimming) { ...@@ -3063,77 +3063,3 @@ TEST(SamplingHeapProfilerLeftTrimming) {
heap_profiler->StopSamplingHeapProfiler(); heap_profiler->StopSamplingHeapProfiler();
} }
TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction ||
i::FLAG_stress_incremental_marking) {
return;
}
v8::HandleScope scope(v8::Isolate::GetCurrent());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
// Suppress randomness to avoid flakiness in tests.
v8::internal::FLAG_sampling_heap_profiler_suppress_randomness = true;
// Grow new space unitl maximum capacity reached.
while (!CcTest::heap()->new_space()->IsAtMaximumCapacity()) {
CcTest::heap()->new_space()->Grow();
}
i::ScopedVector<char> source(1024);
i::SNPrintF(source,
"var number_elements = %d;"
"var elements = new Array(number_elements);"
"function f() {"
" for (var i = 0; i < number_elements; i++) {"
" elements[i] = [{}, {}, {}];"
" }"
" return elements[number_elements - 1];"
"};"
"f(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
"f();"
"f;",
i::AllocationSite::kPretenureMinimumCreated + 1);
v8::Local<v8::Function> f =
v8::Local<v8::Function>::Cast(CompileRun(source.start()));
// Make sure the function is producing pre-tenured objects.
auto res = f->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
i::Handle<i::JSObject> o = i::Handle<i::JSObject>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
CHECK(CcTest::heap()->InOldSpace(o->elements()));
CHECK(CcTest::heap()->InOldSpace(*o));
// Call the function and profile it.
heap_profiler->StartSamplingHeapProfiler(64);
for (int i = 0; i < 100; ++i) {
f->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
}
std::unique_ptr<v8::AllocationProfile> profile(
heap_profiler->GetAllocationProfile());
CHECK(profile);
heap_profiler->StopSamplingHeapProfiler();
const char* names[] = {"f"};
auto node_f = FindAllocationProfileNode(env->GetIsolate(), *profile,
ArrayVector(names));
CHECK(node_f);
int count = 0;
for (auto allocation : node_f->allocations) {
count += allocation.count;
}
CHECK_GE(count, 9000);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment