Commit f32972f8 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Add new interface to AllocationCounter

Add methods NotifyBytes(), NotifyObject() and NextBytes() to
AllocationCounter. Methods are unused for now.

Move AllocationObserver::Step after AllocationCounter methods as well.
Use SetTopAndLimit as bottleneck instead of allocation_info_.Reset.

Bug: v8:10315
Change-Id: I30049cb02e873bb08ebce606a491d99130421227
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2316103
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69060}
parent 8b76b879
......@@ -21,6 +21,50 @@ void AllocationCounter::RemoveAllocationObserver(AllocationObserver* observer) {
allocation_observers_.erase(it);
}
intptr_t AllocationCounter::GetNextInlineAllocationStepSize() {
intptr_t next_step = 0;
for (AllocationObserver* observer : allocation_observers_) {
next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
: observer->bytes_to_next_step();
}
DCHECK(!HasAllocationObservers() || next_step > 0);
return next_step;
}
void AllocationCounter::NotifyBytes(size_t allocated) {
if (!IsActive()) {
return;
}
DCHECK_LE(allocated, next_counter_ - current_counter_);
current_counter_ += allocated;
}
void AllocationCounter::NotifyObject(Address soon_object, size_t object_size) {
if (!IsActive()) {
return;
}
DCHECK_GT(object_size, next_counter_ - current_counter_);
size_t bytes_since_last_step = current_counter_ - prev_counter_;
DCHECK(!heap_->allocation_step_in_progress());
heap_->set_allocation_step_in_progress(true);
DCHECK(soon_object);
heap_->CreateFillerObjectAt(soon_object, static_cast<int>(object_size),
ClearRecordedSlots::kNo);
intptr_t next_step = 0;
for (AllocationObserver* observer : allocation_observers_) {
observer->AllocationStep(static_cast<int>(bytes_since_last_step),
soon_object, object_size);
next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
: observer->bytes_to_next_step();
}
heap_->set_allocation_step_in_progress(false);
prev_counter_ = current_counter_;
next_counter_ = current_counter_ + object_size + next_step;
}
void AllocationObserver::AllocationStep(int bytes_allocated,
Address soon_object, size_t size) {
DCHECK_GE(bytes_allocated, 0);
......@@ -33,16 +77,6 @@ void AllocationObserver::AllocationStep(int bytes_allocated,
DCHECK_GE(bytes_to_next_step_, 0);
}
intptr_t AllocationCounter::GetNextInlineAllocationStepSize() {
intptr_t next_step = 0;
for (AllocationObserver* observer : allocation_observers_) {
next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
: observer->bytes_to_next_step();
}
DCHECK(!HasAllocationObservers() || next_step > 0);
return next_step;
}
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
......
......@@ -13,10 +13,16 @@ namespace v8 {
namespace internal {
class AllocationObserver;
class Heap;
class AllocationCounter {
public:
AllocationCounter() : paused_(false) {}
explicit AllocationCounter(Heap* heap)
: heap_(heap),
paused_(false),
prev_counter_(0),
current_counter_(0),
next_counter_(0) {}
auto begin() { return allocation_observers_.begin(); }
auto end() { return allocation_observers_.end(); }
......@@ -41,11 +47,24 @@ class AllocationCounter {
intptr_t GetNextInlineAllocationStepSize();
void NotifyBytes(size_t allocated);
void NotifyObject(Address soon_object, size_t object_size);
size_t NextBytes() {
DCHECK(IsActive());
return next_counter_ - current_counter_;
}
private:
bool IsPaused() { return paused_; }
std::vector<AllocationObserver*> allocation_observers_;
Heap* heap_;
bool paused_;
size_t prev_counter_;
size_t current_counter_;
size_t next_counter_;
};
// -----------------------------------------------------------------------------
......
......@@ -305,6 +305,13 @@ void PagedSpace::RemovePage(Page* page) {
}
}
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit);
}
size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
......@@ -473,7 +480,7 @@ void PagedSpace::ReleasePage(Page* page) {
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
DCHECK(!top_on_previous_step_);
allocation_info_.Reset(kNullAddress, kNullAddress);
SetTopAndLimit(kNullAddress, kNullAddress);
}
heap()->isolate()->RemoveCodeMemoryChunk(page);
......
......@@ -314,12 +314,7 @@ class V8_EXPORT_PRIVATE PagedSpace
private:
// Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit);
}
void SetTopAndLimit(Address top, Address limit);
void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsInlineAllocation() override {
......
......@@ -112,6 +112,7 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
public:
Space(Heap* heap, AllocationSpace id, FreeList* free_list)
: BaseSpace(heap, id),
allocation_counter_(heap),
free_list_(std::unique_ptr<FreeList>(free_list)) {
external_backing_store_bytes_ =
new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment