Commit ef603a9e authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

Revert "[heap] Refactor allocation observer in AllocationCounter"

This reverts commit b354e344.

Reason for revert: Clusterfuzz found issues with this CL.

Original change's description:
> [heap] Refactor allocation observer in AllocationCounter
> 
> Moves accounting of allocation observers into the AllocationCounter
> class. This CL removes top_on_previous_step_ for counters that are
> increased regularly in the slow path of the allocation functions.
> 
> AdvanceAllocationObservers() informs the AllocationCounter about
> allocated bytes, InvokeAllocationObservers() needs to be invoked when
> an allocation step is reached. NextBytes() returns the number of bytes
> until the next AllocationObserver::Step needs to run.
> 
> Bug: v8:10315
> Change-Id: I8b6eb8719ab032d44ee0614d2a0f2645bfce9df6
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2320650
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69170}

TBR=ulan@chromium.org,dinfuehr@chromium.org

Change-Id: Icd713207bfb2085421fd82009be24a0211ae86da
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:10315
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2332667Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69187}
parent f220997e
...@@ -11,97 +11,70 @@ namespace v8 { ...@@ -11,97 +11,70 @@ namespace v8 {
namespace internal { namespace internal {
void AllocationCounter::AddAllocationObserver(AllocationObserver* observer) { void AllocationCounter::AddAllocationObserver(AllocationObserver* observer) {
intptr_t step_size = observer->GetNextStepSize(); allocation_observers_.push_back(observer);
size_t observer_next_counter = current_counter_ + step_size;
#if DEBUG
auto it =
std::find_if(observers_.begin(), observers_.end(),
[observer](const ObserverAccounting& observer_accounting) {
return observer_accounting.observer_ == observer;
});
DCHECK_EQ(observers_.end(), it);
#endif
observers_.push_back(
ObserverAccounting(observer, current_counter_, observer_next_counter));
if (observers_.size() == 1) {
DCHECK_EQ(current_counter_, next_counter_);
next_counter_ = observer_next_counter;
} else {
size_t missing_bytes = next_counter_ - current_counter_;
next_counter_ =
current_counter_ + Min(static_cast<intptr_t>(missing_bytes), step_size);
}
} }
void AllocationCounter::RemoveAllocationObserver(AllocationObserver* observer) { void AllocationCounter::RemoveAllocationObserver(AllocationObserver* observer) {
auto it = auto it = std::find(allocation_observers_.begin(),
std::find_if(observers_.begin(), observers_.end(), allocation_observers_.end(), observer);
[observer](const ObserverAccounting& observer_accounting) { DCHECK(allocation_observers_.end() != it);
return observer_accounting.observer_ == observer; allocation_observers_.erase(it);
}); }
DCHECK_NE(observers_.end(), it);
observers_.erase(it); intptr_t AllocationCounter::GetNextInlineAllocationStepSize() {
intptr_t next_step = 0;
if (observers_.size() == 0) { for (AllocationObserver* observer : allocation_observers_) {
current_counter_ = next_counter_ = 0; next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
} else { : observer->bytes_to_next_step();
size_t step_size = 0;
for (ObserverAccounting& observer : observers_) {
size_t left_in_step = observer.next_counter_ - current_counter_;
DCHECK_GT(left_in_step, 0);
step_size = step_size ? Min(step_size, left_in_step) : left_in_step;
}
next_counter_ = current_counter_ + step_size;
} }
DCHECK(!HasAllocationObservers() || next_step > 0);
return next_step;
} }
void AllocationCounter::AdvanceAllocationObservers(size_t allocated) { void AllocationCounter::NotifyBytes(size_t allocated) {
if (!IsActive()) { if (!IsActive()) {
return; return;
} }
DCHECK_LT(allocated, next_counter_ - current_counter_); DCHECK_LE(allocated, next_counter_ - current_counter_);
current_counter_ += allocated; current_counter_ += allocated;
} }
void AllocationCounter::InvokeAllocationObservers(Address soon_object, void AllocationCounter::NotifyObject(Address soon_object, size_t object_size) {
size_t object_size,
size_t aligned_object_size) {
if (!IsActive()) { if (!IsActive()) {
return; return;
} }
DCHECK_GE(aligned_object_size, next_counter_ - current_counter_); DCHECK_GT(object_size, next_counter_ - current_counter_);
size_t bytes_since_last_step = current_counter_ - prev_counter_;
DCHECK(!heap_->allocation_step_in_progress());
heap_->set_allocation_step_in_progress(true);
DCHECK(soon_object); DCHECK(soon_object);
size_t step_size = 0; heap_->CreateFillerObjectAt(soon_object, static_cast<int>(object_size),
bool step_run = false; ClearRecordedSlots::kNo);
for (ObserverAccounting& observer_accounting : observers_) { intptr_t next_step = 0;
if (observer_accounting.next_counter_ - current_counter_ <= for (AllocationObserver* observer : allocation_observers_) {
aligned_object_size) { observer->AllocationStep(static_cast<int>(bytes_since_last_step),
observer_accounting.observer_->Step( soon_object, object_size);
static_cast<int>(current_counter_ - next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
observer_accounting.prev_counter_), : observer->bytes_to_next_step();
soon_object, object_size);
size_t observer_step_size =
observer_accounting.observer_->GetNextStepSize();
observer_accounting.prev_counter_ = current_counter_;
observer_accounting.next_counter_ =
current_counter_ + aligned_object_size + observer_step_size;
step_run = true;
}
size_t left_in_step = observer_accounting.next_counter_ - current_counter_;
step_size = step_size ? Min(step_size, left_in_step) : left_in_step;
} }
heap_->set_allocation_step_in_progress(false);
CHECK(step_run); prev_counter_ = current_counter_;
next_counter_ = current_counter_ + step_size; next_counter_ = current_counter_ + object_size + next_step;
}
void AllocationObserver::AllocationStep(int bytes_allocated,
Address soon_object, size_t size) {
DCHECK_GE(bytes_allocated, 0);
bytes_to_next_step_ -= bytes_allocated;
if (bytes_to_next_step_ <= 0) {
Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
step_size_ = GetNextStepSize();
bytes_to_next_step_ = step_size_;
}
DCHECK_GE(bytes_to_next_step_, 0);
} }
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap) PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#ifndef V8_HEAP_ALLOCATION_OBSERVER_H_ #ifndef V8_HEAP_ALLOCATION_OBSERVER_H_
#define V8_HEAP_ALLOCATION_OBSERVER_H_ #define V8_HEAP_ALLOCATION_OBSERVER_H_
#include <cstdint>
#include <vector> #include <vector>
#include "src/common/globals.h" #include "src/common/globals.h"
...@@ -14,14 +13,27 @@ namespace v8 { ...@@ -14,14 +13,27 @@ namespace v8 {
namespace internal { namespace internal {
class AllocationObserver; class AllocationObserver;
class Heap;
class AllocationCounter { class AllocationCounter {
public: public:
AllocationCounter() : paused_(false), current_counter_(0), next_counter_(0) {} explicit AllocationCounter(Heap* heap)
V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver* observer); : heap_(heap),
V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver* observer); paused_(false),
prev_counter_(0),
current_counter_(0),
next_counter_(0) {}
bool IsActive() { return !IsPaused() && observers_.size() > 0; } auto begin() { return allocation_observers_.begin(); }
auto end() { return allocation_observers_.end(); }
void AddAllocationObserver(AllocationObserver* observer);
void RemoveAllocationObserver(AllocationObserver* observer);
bool HasAllocationObservers() { return !allocation_observers_.empty(); }
size_t NumberAllocationObservers() { return allocation_observers_.size(); }
bool IsActive() { return !IsPaused() && HasAllocationObservers(); }
void Pause() { void Pause() {
DCHECK(!paused_); DCHECK(!paused_);
...@@ -33,10 +45,10 @@ class AllocationCounter { ...@@ -33,10 +45,10 @@ class AllocationCounter {
paused_ = false; paused_ = false;
} }
V8_EXPORT_PRIVATE void AdvanceAllocationObservers(size_t allocated); intptr_t GetNextInlineAllocationStepSize();
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
size_t object_size, void NotifyBytes(size_t allocated);
size_t aligned_object_size); void NotifyObject(Address soon_object, size_t object_size);
size_t NextBytes() { size_t NextBytes() {
DCHECK(IsActive()); DCHECK(IsActive());
...@@ -46,22 +58,11 @@ class AllocationCounter { ...@@ -46,22 +58,11 @@ class AllocationCounter {
private: private:
bool IsPaused() { return paused_; } bool IsPaused() { return paused_; }
struct ObserverAccounting { std::vector<AllocationObserver*> allocation_observers_;
ObserverAccounting(AllocationObserver* observer, size_t prev_counter, Heap* heap_;
size_t next_counter)
: observer_(observer),
prev_counter_(prev_counter),
next_counter_(next_counter) {}
AllocationObserver* observer_;
size_t prev_counter_;
size_t next_counter_;
};
std::vector<ObserverAccounting> observers_;
bool paused_; bool paused_;
size_t prev_counter_;
size_t current_counter_; size_t current_counter_;
size_t next_counter_; size_t next_counter_;
}; };
...@@ -70,12 +71,21 @@ class AllocationCounter { ...@@ -70,12 +71,21 @@ class AllocationCounter {
// Allows observation of allocations. // Allows observation of allocations.
class AllocationObserver { class AllocationObserver {
public: public:
explicit AllocationObserver(intptr_t step_size) : step_size_(step_size) { explicit AllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) {
DCHECK_LE(kTaggedSize, step_size); DCHECK_LE(kTaggedSize, step_size);
} }
virtual ~AllocationObserver() = default; virtual ~AllocationObserver() = default;
// Called each time the observed space does an allocation step. This may be
// more frequently than the step_size we are monitoring (e.g. when there are
// multiple observers, or when page or space boundary is encountered.)
void AllocationStep(int bytes_allocated, Address soon_object, size_t size);
protected: protected:
intptr_t step_size() const { return step_size_; }
intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
// Pure virtual method provided by the subclasses that gets called when at // Pure virtual method provided by the subclasses that gets called when at
// least step_size bytes have been allocated. soon_object is the address just // least step_size bytes have been allocated. soon_object is the address just
// allocated (but not yet initialized.) size is the size of the object as // allocated (but not yet initialized.) size is the size of the object as
...@@ -93,9 +103,10 @@ class AllocationObserver { ...@@ -93,9 +103,10 @@ class AllocationObserver {
// Subclasses can override this method to make step size dynamic. // Subclasses can override this method to make step size dynamic.
virtual intptr_t GetNextStepSize() { return step_size_; } virtual intptr_t GetNextStepSize() { return step_size_; }
private:
intptr_t step_size_; intptr_t step_size_;
intptr_t bytes_to_next_step_;
private:
friend class AllocationCounter; friend class AllocationCounter;
DISALLOW_COPY_AND_ASSIGN(AllocationObserver); DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
}; };
......
...@@ -66,11 +66,6 @@ HeapObject AllocationResult::ToObject() { ...@@ -66,11 +66,6 @@ HeapObject AllocationResult::ToObject() {
return HeapObject::cast(object_); return HeapObject::cast(object_);
} }
Address AllocationResult::ToAddress() {
DCHECK(!IsRetry());
return HeapObject::cast(object_).address();
}
Isolate* Heap::isolate() { Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>( return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) - reinterpret_cast<intptr_t>(this) -
......
...@@ -900,7 +900,6 @@ void Heap::MergeAllocationSitePretenuringFeedback( ...@@ -900,7 +900,6 @@ void Heap::MergeAllocationSitePretenuringFeedback(
void Heap::AddAllocationObserversToAllSpaces( void Heap::AddAllocationObserversToAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) { AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer); DCHECK(observer && new_space_observer);
SafepointScope scope(this);
for (SpaceIterator it(this); it.HasNext();) { for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next(); Space* space = it.Next();
...@@ -915,7 +914,6 @@ void Heap::AddAllocationObserversToAllSpaces( ...@@ -915,7 +914,6 @@ void Heap::AddAllocationObserversToAllSpaces(
void Heap::RemoveAllocationObserversFromAllSpaces( void Heap::RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) { AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer); DCHECK(observer && new_space_observer);
SafepointScope scope(this);
for (SpaceIterator it(this); it.HasNext();) { for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next(); Space* space = it.Next();
...@@ -5129,7 +5127,6 @@ void Heap::EnableInlineAllocation() { ...@@ -5129,7 +5127,6 @@ void Heap::EnableInlineAllocation() {
inline_allocation_disabled_ = false; inline_allocation_disabled_ = false;
// Update inline allocation limit for new space. // Update inline allocation limit for new space.
new_space()->AdvanceAllocationObservers();
new_space()->UpdateInlineAllocationLimit(0); new_space()->UpdateInlineAllocationLimit(0);
} }
......
...@@ -197,7 +197,6 @@ class AllocationResult { ...@@ -197,7 +197,6 @@ class AllocationResult {
inline bool IsRetry() { return object_.IsSmi(); } inline bool IsRetry() { return object_.IsSmi(); }
inline HeapObject ToObjectChecked(); inline HeapObject ToObjectChecked();
inline HeapObject ToObject(); inline HeapObject ToObject();
inline Address ToAddress();
inline AllocationSpace RetrySpace(); inline AllocationSpace RetrySpace();
template <typename T> template <typename T>
...@@ -1396,6 +1395,11 @@ class Heap { ...@@ -1396,6 +1395,11 @@ class Heap {
void RemoveAllocationObserversFromAllSpaces( void RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer); AllocationObserver* observer, AllocationObserver* new_space_observer);
bool allocation_step_in_progress() { return allocation_step_in_progress_; }
void set_allocation_step_in_progress(bool val) {
allocation_step_in_progress_ = val;
}
// =========================================================================== // ===========================================================================
// Heap object allocation tracking. ========================================== // Heap object allocation tracking. ==========================================
// =========================================================================== // ===========================================================================
...@@ -2075,6 +2079,8 @@ class Heap { ...@@ -2075,6 +2079,8 @@ class Heap {
// Observer that can cause early scavenge start. // Observer that can cause early scavenge start.
StressScavengeObserver* stress_scavenge_observer_ = nullptr; StressScavengeObserver* stress_scavenge_observer_ = nullptr;
bool allocation_step_in_progress_ = false;
// The maximum percent of the marking limit reached wihout causing marking. // The maximum percent of the marking limit reached wihout causing marking.
// This is tracked when specyfing --fuzzer-gc-analysis. // This is tracked when specyfing --fuzzer-gc-analysis.
double max_marking_limit_reached_ = 0.0; double max_marking_limit_reached_ = 0.0;
......
...@@ -108,19 +108,6 @@ void LargeObjectSpace::TearDown() { ...@@ -108,19 +108,6 @@ void LargeObjectSpace::TearDown() {
} }
} }
void LargeObjectSpace::AdvanceAndInvokeAllocationObservers(Address soon_object,
size_t object_size) {
if (!allocation_counter_.IsActive()) return;
if (object_size >= allocation_counter_.NextBytes()) {
allocation_counter_.InvokeAllocationObservers(soon_object, object_size,
object_size);
}
// Large objects can be accounted immediately since no LAB is involved.
allocation_counter_.AdvanceAllocationObservers(object_size);
}
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) { AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
return AllocateRaw(object_size, NOT_EXECUTABLE); return AllocateRaw(object_size, NOT_EXECUTABLE);
} }
...@@ -149,8 +136,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size, ...@@ -149,8 +136,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->marking_state()->IsBlack(object)); heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence(); page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion(identity(), page); heap()->NotifyOldGenerationExpansion(identity(), page);
AdvanceAndInvokeAllocationObservers(object.address(), AllocationStep(object_size, object.address(), object_size);
static_cast<size_t>(object_size));
return object; return object;
} }
...@@ -472,8 +458,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) { ...@@ -472,8 +458,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
page->InitializationMemoryFence(); page->InitializationMemoryFence();
DCHECK(page->IsLargePage()); DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE); DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AdvanceAndInvokeAllocationObservers(result.address(), AllocationStep(object_size, result.address(), object_size);
static_cast<size_t>(object_size));
return result; return result;
} }
......
...@@ -113,8 +113,6 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { ...@@ -113,8 +113,6 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
protected: protected:
LargeObjectSpace(Heap* heap, AllocationSpace id); LargeObjectSpace(Heap* heap, AllocationSpace id);
void AdvanceAndInvokeAllocationObservers(Address soon_object, size_t size);
LargePage* AllocateLargePage(int object_size, Executability executable); LargePage* AllocateLargePage(int object_size, Executability executable);
size_t size_; // allocated bytes size_t size_; // allocated bytes
......
...@@ -90,7 +90,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes, ...@@ -90,7 +90,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult result; AllocationResult result;
if (alignment != kWordAligned) { if (alignment != kWordAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin); result = AllocateFastAligned(size_in_bytes, alignment, origin);
} else { } else {
result = AllocateFastUnaligned(size_in_bytes, origin); result = AllocateFastUnaligned(size_in_bytes, origin);
} }
...@@ -122,9 +122,9 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes, ...@@ -122,9 +122,9 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
return obj; return obj;
} }
AllocationResult NewSpace::AllocateFastAligned( AllocationResult NewSpace::AllocateFastAligned(int size_in_bytes,
int size_in_bytes, int* result_aligned_size_in_bytes, AllocationAlignment alignment,
AllocationAlignment alignment, AllocationOrigin origin) { AllocationOrigin origin) {
Address top = allocation_info_.top(); Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment); int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size; int aligned_size_in_bytes = size_in_bytes + filler_size;
...@@ -136,8 +136,6 @@ AllocationResult NewSpace::AllocateFastAligned( ...@@ -136,8 +136,6 @@ AllocationResult NewSpace::AllocateFastAligned(
HeapObject obj = HeapObject::FromAddress(top); HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + aligned_size_in_bytes); allocation_info_.set_top(top + aligned_size_in_bytes);
if (result_aligned_size_in_bytes)
*result_aligned_size_in_bytes = aligned_size_in_bytes;
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) { if (filler_size > 0) {
......
...@@ -465,7 +465,8 @@ bool NewSpace::Rebalance() { ...@@ -465,7 +465,8 @@ bool NewSpace::Rebalance() {
} }
void NewSpace::UpdateLinearAllocationArea() { void NewSpace::UpdateLinearAllocationArea() {
AdvanceAllocationObservers(); // Make sure there is no unaccounted allocations.
DCHECK(!allocation_counter_.IsActive() || top_on_previous_step_ == top());
Address new_top = to_space_.page_low(); Address new_top = to_space_.page_low();
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top()); BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
...@@ -474,10 +475,13 @@ void NewSpace::UpdateLinearAllocationArea() { ...@@ -474,10 +475,13 @@ void NewSpace::UpdateLinearAllocationArea() {
// See the corresponding loads in ConcurrentMarking::Run. // See the corresponding loads in ConcurrentMarking::Run.
original_limit_.store(limit(), std::memory_order_relaxed); original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release); original_top_.store(top(), std::memory_order_release);
StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
} }
void NewSpace::ResetLinearAllocationArea() { void NewSpace::ResetLinearAllocationArea() {
// Do a step to account for memory allocated so far before resetting.
InlineAllocationStep(top(), top(), kNullAddress, 0);
to_space_.Reset(); to_space_.Reset();
UpdateLinearAllocationArea(); UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space. // Clear all mark-bits in the to-space.
...@@ -502,6 +506,9 @@ bool NewSpace::AddFreshPage() { ...@@ -502,6 +506,9 @@ bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top(); Address top = allocation_info_.top();
DCHECK(!OldSpace::IsAtPageStart(top)); DCHECK(!OldSpace::IsAtPageStart(top));
// Do a step to account for memory allocated on previous page.
InlineAllocationStep(top, top, kNullAddress, 0);
if (!to_space_.AdvancePage()) { if (!to_space_.AdvancePage()) {
// No more pages left to advance. // No more pages left to advance.
return false; return false;
...@@ -523,30 +530,34 @@ bool NewSpace::AddFreshPageSynchronized() { ...@@ -523,30 +530,34 @@ bool NewSpace::AddFreshPageSynchronized() {
bool NewSpace::EnsureAllocation(int size_in_bytes, bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) { AllocationAlignment alignment) {
AdvanceAllocationObservers();
Address old_top = allocation_info_.top(); Address old_top = allocation_info_.top();
Address high = to_space_.page_high(); Address high = to_space_.page_high();
int filler_size = Heap::GetFillToAlign(old_top, alignment); int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size; int aligned_size_in_bytes = size_in_bytes + filler_size;
if (old_top + aligned_size_in_bytes <= high) { if (old_top + aligned_size_in_bytes > high) {
UpdateInlineAllocationLimit(aligned_size_in_bytes); // Not enough room in the page, try to allocate a new one.
return true; if (!AddFreshPage()) {
} return false;
}
// Not enough room in the page, try to allocate a new one. old_top = allocation_info_.top();
if (!AddFreshPage()) { high = to_space_.page_high();
return false; filler_size = Heap::GetFillToAlign(old_top, alignment);
} }
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
DCHECK(old_top + aligned_size_in_bytes <= high); DCHECK(old_top + aligned_size_in_bytes <= high);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step,
// or because idle scavenge job wants to get a chance to post a task.
// Set the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
Address soon_object = old_top + filler_size;
InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
}
return true; return true;
} }
...@@ -557,6 +568,12 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) { ...@@ -557,6 +568,12 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes, AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin) { AllocationOrigin origin) {
if (top() < top_on_previous_step_) {
// Generated code decreased the top() pointer to do folded allocations
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_));
top_on_previous_step_ = top();
}
#ifdef V8_HOST_ARCH_32_BIT #ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned return alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin) ? AllocateRawAligned(size_in_bytes, alignment, origin)
...@@ -578,14 +595,8 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes, ...@@ -578,14 +595,8 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
return AllocationResult::Retry(); return AllocationResult::Retry();
} }
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin); AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
DCHECK(!result.IsRetry()); DCHECK(!result.IsRetry());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
return result; return result;
} }
...@@ -596,17 +607,9 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes, ...@@ -596,17 +607,9 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
return AllocationResult::Retry(); return AllocationResult::Retry();
} }
DCHECK_EQ(allocation_info_.start(), allocation_info_.top()); AllocationResult result =
AllocateFastAligned(size_in_bytes, alignment, origin);
int aligned_size_in_bytes;
AllocationResult result = AllocateFastAligned(
size_in_bytes, &aligned_size_in_bytes, alignment, origin);
DCHECK(!result.IsRetry()); DCHECK(!result.IsRetry());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, aligned_size_in_bytes);
return result; return result;
} }
......
...@@ -477,8 +477,8 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -477,8 +477,8 @@ class V8_EXPORT_PRIVATE NewSpace
// Internal allocation methods. // Internal allocation methods.
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastAligned(int size_in_bytes, int* aligned_size_in_bytes, AllocateFastAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationAlignment alignment, AllocationOrigin origin); AllocationOrigin origin);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin); AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
...@@ -495,7 +495,7 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -495,7 +495,7 @@ class V8_EXPORT_PRIVATE NewSpace
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime); int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment); bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsAllocationObserver() override { return true; } bool SupportsInlineAllocation() override { return true; }
friend class SemiSpaceObjectIterator; friend class SemiSpaceObjectIterator;
}; };
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#define V8_HEAP_PAGED_SPACES_INL_H_ #define V8_HEAP_PAGED_SPACES_INL_H_
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h" #include "src/heap/incremental-marking.h"
#include "src/heap/paged-spaces.h" #include "src/heap/paged-spaces.h"
#include "src/objects/code-inl.h" #include "src/objects/code-inl.h"
...@@ -97,20 +96,18 @@ bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) { ...@@ -97,20 +96,18 @@ bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
return RefillLabMain(size_in_bytes, origin); return RefillLabMain(size_in_bytes, origin);
} }
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) { AllocationResult PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top(); Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes; Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit()) if (new_top > allocation_info_.limit())
return AllocationResult::Retry(identity()); return AllocationResult::Retry(identity());
DCHECK_LE(new_top, allocation_info_.limit()); DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top); allocation_info_.set_top(new_top);
return AllocationResult(HeapObject::FromAddress(current_top)); return AllocationResult(HeapObject::FromAddress(current_top));
} }
AllocationResult PagedSpace::AllocateFastAligned( AllocationResult PagedSpace::TryAllocateLinearlyAligned(
int size_in_bytes, int* aligned_size_in_bytes, int size_in_bytes, AllocationAlignment alignment) {
AllocationAlignment alignment) {
Address current_top = allocation_info_.top(); Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment); int filler_size = Heap::GetFillToAlign(current_top, alignment);
...@@ -119,8 +116,6 @@ AllocationResult PagedSpace::AllocateFastAligned( ...@@ -119,8 +116,6 @@ AllocationResult PagedSpace::AllocateFastAligned(
return AllocationResult::Retry(identity()); return AllocationResult::Retry(identity());
allocation_info_.set_top(new_top); allocation_info_.set_top(new_top);
if (aligned_size_in_bytes)
*aligned_size_in_bytes = filler_size + size_in_bytes;
if (filler_size > 0) { if (filler_size > 0) {
Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
HeapObject::FromAddress(current_top), filler_size); HeapObject::FromAddress(current_top), filler_size);
...@@ -134,8 +129,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes, ...@@ -134,8 +129,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
if (!EnsureLabMain(size_in_bytes, origin)) { if (!EnsureLabMain(size_in_bytes, origin)) {
return AllocationResult::Retry(identity()); return AllocationResult::Retry(identity());
} }
AllocationResult result = AllocateLinearly(size_in_bytes);
AllocationResult result = AllocateFastUnaligned(size_in_bytes);
DCHECK(!result.IsRetry()); DCHECK(!result.IsRetry());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(), MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes); size_in_bytes);
...@@ -144,9 +138,6 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes, ...@@ -144,9 +138,6 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
UpdateAllocationOrigins(origin); UpdateAllocationOrigins(origin);
} }
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
return result; return result;
} }
...@@ -162,9 +153,8 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes, ...@@ -162,9 +153,8 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
if (!EnsureLabMain(allocation_size, origin)) { if (!EnsureLabMain(allocation_size, origin)) {
return AllocationResult::Retry(identity()); return AllocationResult::Retry(identity());
} }
int aligned_size_in_bytes;
AllocationResult result = AllocationResult result =
AllocateFastAligned(size_in_bytes, &aligned_size_in_bytes, alignment); TryAllocateLinearlyAligned(size_in_bytes, alignment);
DCHECK(!result.IsRetry()); DCHECK(!result.IsRetry());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(), MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes); size_in_bytes);
...@@ -173,9 +163,6 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes, ...@@ -173,9 +163,6 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
UpdateAllocationOrigins(origin); UpdateAllocationOrigins(origin);
} }
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, allocation_size);
return result; return result;
} }
...@@ -185,9 +172,9 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes, ...@@ -185,9 +172,9 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult result; AllocationResult result;
if (alignment != kWordAligned) { if (alignment != kWordAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment); result = TryAllocateLinearlyAligned(size_in_bytes, alignment);
} else { } else {
result = AllocateFastUnaligned(size_in_bytes); result = AllocateLinearly(size_in_bytes);
} }
if (!result.IsRetry()) { if (!result.IsRetry()) {
......
...@@ -383,13 +383,6 @@ void PagedSpace::DecreaseLimit(Address new_limit) { ...@@ -383,13 +383,6 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
DCHECK_LE(top(), new_limit); DCHECK_LE(top(), new_limit);
DCHECK_GE(old_limit, new_limit); DCHECK_GE(old_limit, new_limit);
if (new_limit != old_limit) { if (new_limit != old_limit) {
base::Optional<CodePageMemoryModificationScope> optional_scope;
if (identity() == CODE_SPACE) {
MemoryChunk* chunk = MemoryChunk::FromAddress(new_limit);
optional_scope.emplace(chunk);
}
SetTopAndLimit(top(), new_limit); SetTopAndLimit(top(), new_limit);
Free(new_limit, old_limit - new_limit, Free(new_limit, old_limit - new_limit,
SpaceAccountingMode::kSpaceAccounted); SpaceAccountingMode::kSpaceAccounted);
...@@ -446,7 +439,12 @@ void PagedSpace::FreeLinearAllocationArea() { ...@@ -446,7 +439,12 @@ void PagedSpace::FreeLinearAllocationArea() {
return; return;
} }
AdvanceAllocationObservers(); if (!is_local_space()) {
// This can start incremental marking and mark the current
// linear allocation area as black. Thus destroying of the black
// area needs to happen afterwards.
InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
}
if (current_top != current_limit && !is_off_thread_space() && if (current_top != current_limit && !is_off_thread_space() &&
heap()->incremental_marking()->black_allocation()) { heap()->incremental_marking()->black_allocation()) {
...@@ -481,6 +479,7 @@ void PagedSpace::ReleasePage(Page* page) { ...@@ -481,6 +479,7 @@ void PagedSpace::ReleasePage(Page* page) {
free_list_->EvictFreeListItems(page); free_list_->EvictFreeListItems(page);
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
DCHECK(!top_on_previous_step_);
SetTopAndLimit(kNullAddress, kNullAddress); SetTopAndLimit(kNullAddress, kNullAddress);
} }
...@@ -553,7 +552,6 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes, ...@@ -553,7 +552,6 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
Page* page = Page::FromHeapObject(new_node); Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page); IncreaseAllocatedBytes(new_node_size, page);
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
Address start = new_node.address(); Address start = new_node.address();
Address end = new_node.address() + new_node_size; Address end = new_node.address() + new_node_size;
Address limit = ComputeLimit(start, end, size_in_bytes); Address limit = ComputeLimit(start, end, size_in_bytes);
...@@ -850,9 +848,6 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() { ...@@ -850,9 +848,6 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
#endif #endif
void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) { void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
Address new_limit = ComputeLimit(top(), limit(), min_size); Address new_limit = ComputeLimit(top(), limit(), min_size);
DCHECK_LE(top(), new_limit); DCHECK_LE(top(), new_limit);
DCHECK_LE(new_limit, limit()); DCHECK_LE(new_limit, limit());
...@@ -1004,6 +999,20 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes, ...@@ -1004,6 +999,20 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes, AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin) { AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
if (!is_local_space()) { if (!is_local_space()) {
// Start incremental marking before the actual allocation, this allows the // Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is // allocation function to mark the object black when incremental marking is
...@@ -1021,6 +1030,15 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes, ...@@ -1021,6 +1030,15 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
#else #else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin); AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif #endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result; return result;
} }
......
...@@ -317,7 +317,9 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -317,7 +317,9 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetTopAndLimit(Address top, Address limit); void SetTopAndLimit(Address top, Address limit);
void DecreaseLimit(Address new_limit); void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override; void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsAllocationObserver() override { return !is_local_space(); } bool SupportsInlineAllocation() override {
return identity() == OLD_SPACE && !is_local_space();
}
// Slow path of allocation function // Slow path of allocation function
V8_WARN_UNUSED_RESULT AllocationResult V8_WARN_UNUSED_RESULT AllocationResult
...@@ -348,14 +350,13 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -348,14 +350,13 @@ class V8_EXPORT_PRIVATE PagedSpace
inline bool EnsureLabMain(int size_in_bytes, AllocationOrigin origin); inline bool EnsureLabMain(int size_in_bytes, AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the // Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object. // linear allocation area is large enought to fit the object.
inline AllocationResult AllocateFastUnaligned(int size_in_bytes); inline AllocationResult AllocateLinearly(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area. // Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object. // Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size // Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes. // (object size + alignment filler size) to the size_in_bytes.
inline AllocationResult AllocateFastAligned(int size_in_bytes, inline AllocationResult TryAllocateLinearlyAligned(
int* aligned_size_in_bytes, int size_in_bytes, AllocationAlignment alignment);
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain( V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin); size_t size_in_bytes, AllocationOrigin origin);
......
...@@ -249,16 +249,46 @@ void Page::DestroyBlackAreaBackground(Address start, Address end) { ...@@ -249,16 +249,46 @@ void Page::DestroyBlackAreaBackground(Address start, Address end) {
void Space::AddAllocationObserver(AllocationObserver* observer) { void Space::AddAllocationObserver(AllocationObserver* observer) {
allocation_counter_.AddAllocationObserver(observer); allocation_counter_.AddAllocationObserver(observer);
StartNextInlineAllocationStep();
} }
void Space::RemoveAllocationObserver(AllocationObserver* observer) { void Space::RemoveAllocationObserver(AllocationObserver* observer) {
allocation_counter_.RemoveAllocationObserver(observer); allocation_counter_.RemoveAllocationObserver(observer);
StartNextInlineAllocationStep();
} }
void Space::PauseAllocationObservers() { allocation_counter_.Pause(); } void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); } void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
void Space::AllocationStep(int bytes_since_last, Address soon_object,
int size) {
if (!allocation_counter_.IsActive()) {
return;
}
DCHECK(!heap()->allocation_step_in_progress());
heap()->set_allocation_step_in_progress(true);
heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
for (AllocationObserver* observer : allocation_counter_) {
observer->AllocationStep(bytes_since_last, soon_object, size);
}
heap()->set_allocation_step_in_progress(false);
}
void Space::AllocationStepAfterMerge(Address first_object_in_chunk, int size) {
if (!allocation_counter_.IsActive()) {
return;
}
DCHECK(!heap()->allocation_step_in_progress());
heap()->set_allocation_step_in_progress(true);
for (AllocationObserver* observer : allocation_counter_) {
observer->AllocationStep(size, first_object_in_chunk, size);
}
heap()->set_allocation_step_in_progress(false);
}
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end, Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) { size_t min_size) {
DCHECK_GE(end - start, min_size); DCHECK_GE(end - start, min_size);
...@@ -266,19 +296,14 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end, ...@@ -266,19 +296,14 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
if (heap()->inline_allocation_disabled()) { if (heap()->inline_allocation_disabled()) {
// Fit the requested area exactly. // Fit the requested area exactly.
return start + min_size; return start + min_size;
} else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) { } else if (SupportsInlineAllocation() && allocation_counter_.IsActive()) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
// Generated code may allocate inline from the linear allocation area for. // Generated code may allocate inline from the linear allocation area for.
// To make sure we can observe these allocations, we use a lower ©limit. // To make sure we can observe these allocations, we use a lower limit.
size_t step = allocation_counter_.NextBytes(); size_t step = allocation_counter_.GetNextInlineAllocationStepSize();
DCHECK_NE(step, 0);
size_t rounded_step = size_t rounded_step =
RoundSizeDownToObjectAlignment(static_cast<int>(step - 1)); RoundSizeDownToObjectAlignment(static_cast<int>(step - 1));
// Use uint64_t to avoid overflow on 32-bit // Use uint64_t to avoid overflow on 32-bit
uint64_t step_end = uint64_t step_end = static_cast<uint64_t>(start) + min_size + rounded_step;
static_cast<uint64_t>(start) + Max(min_size, rounded_step);
uint64_t new_end = Min(step_end, static_cast<uint64_t>(end)); uint64_t new_end = Min(step_end, static_cast<uint64_t>(end));
return static_cast<Address>(new_end); return static_cast<Address>(new_end);
} else { } else {
...@@ -345,75 +370,74 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=( ...@@ -345,75 +370,74 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
other.allocation_info_.Reset(kNullAddress, kNullAddress); other.allocation_info_.Reset(kNullAddress, kNullAddress);
return *this; return *this;
} }
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
return;
}
if (allocation_counter_.IsActive()) {
top_on_previous_step_ = top();
UpdateInlineAllocationLimit(0);
} else {
DCHECK_EQ(kNullAddress, top_on_previous_step_);
}
}
void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) { void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
AdvanceAllocationObservers(); InlineAllocationStep(top(), top(), kNullAddress, 0);
Space::AddAllocationObserver(observer); Space::AddAllocationObserver(observer);
UpdateInlineAllocationLimit(0); DCHECK_IMPLIES(top_on_previous_step_, allocation_counter_.IsActive());
} }
void SpaceWithLinearArea::RemoveAllocationObserver( void SpaceWithLinearArea::RemoveAllocationObserver(
AllocationObserver* observer) { AllocationObserver* observer) {
AdvanceAllocationObservers(); Address top_for_next_step =
allocation_counter_.NumberAllocationObservers() == 1 ? kNullAddress
: top();
InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
Space::RemoveAllocationObserver(observer); Space::RemoveAllocationObserver(observer);
UpdateInlineAllocationLimit(0); DCHECK_IMPLIES(top_on_previous_step_, allocation_counter_.IsActive());
} }
void SpaceWithLinearArea::PauseAllocationObservers() { void SpaceWithLinearArea::PauseAllocationObservers() {
AdvanceAllocationObservers(); // Do a step to account for memory allocated so far.
InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
Space::PauseAllocationObservers(); Space::PauseAllocationObservers();
DCHECK_EQ(kNullAddress, top_on_previous_step_);
UpdateInlineAllocationLimit(0);
} }
void SpaceWithLinearArea::ResumeAllocationObservers() { void SpaceWithLinearArea::ResumeAllocationObservers() {
DCHECK_EQ(kNullAddress, top_on_previous_step_);
Space::ResumeAllocationObservers(); Space::ResumeAllocationObservers();
allocation_info_.MoveStartToTop(); StartNextInlineAllocationStep();
UpdateInlineAllocationLimit(0);
} }
void SpaceWithLinearArea::AdvanceAllocationObservers() { void SpaceWithLinearArea::InlineAllocationStep(Address top,
if (allocation_info_.top()) { Address top_for_next_step,
allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() - Address soon_object,
allocation_info_.start()); size_t size) {
allocation_info_.MoveStartToTop(); if (heap()->allocation_step_in_progress()) {
// Avoid starting a new step if we are mid-way through an existing one.
return;
} }
}
// Perform an allocation step when the step is reached. size_in_bytes is the if (top_on_previous_step_) {
// actual size needed for the object (required for InvokeAllocationObservers). if (top < top_on_previous_step_) {
// aligned_size_in_bytes is the size of the object including the filler right // Generated code decreased the top pointer to do folded allocations.
// before it to reach the right alignment (required to DCHECK the start of the DCHECK_NE(top, kNullAddress);
// object). allocation_size is the size of the actual allocation which needs to DCHECK_EQ(Page::FromAllocationAreaAddress(top),
// be used for the accounting. It can be different from aligned_size_in_bytes in Page::FromAllocationAreaAddress(top_on_previous_step_));
// PagedSpace::AllocateRawAligned, where we have to overallocate in order to be top_on_previous_step_ = top;
// able to align the allocation afterwards. }
void SpaceWithLinearArea::InvokeAllocationObservers( int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes, AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
size_t allocation_size) { top_on_previous_step_ = top_for_next_step;
DCHECK_LE(size_in_bytes, aligned_size_in_bytes);
DCHECK_LE(aligned_size_in_bytes, allocation_size);
DCHECK(size_in_bytes == aligned_size_in_bytes ||
aligned_size_in_bytes == allocation_size);
if (!SupportsAllocationObserver() || !allocation_counter_.IsActive()) return;
if (allocation_size >= allocation_counter_.NextBytes()) {
// Only the first object in a LAB should reach the next step.
DCHECK_EQ(soon_object,
allocation_info_.start() + aligned_size_in_bytes - size_in_bytes);
// Ensure that there is a valid object
heap_->CreateFillerObjectAt(soon_object, static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
// Run AllocationObserver::Step through the AllocationCounter.
allocation_counter_.InvokeAllocationObservers(soon_object, size_in_bytes,
allocation_size);
} }
DCHECK_LT(allocation_info_.limit() - allocation_info_.start(),
allocation_counter_.NextBytes());
} }
int MemoryChunk::FreeListsLength() { int MemoryChunk::FreeListsLength() {
int length = 0; int length = 0;
for (int cat = kFirstCategory; cat <= owner()->free_list()->last_category(); for (int cat = kFirstCategory; cat <= owner()->free_list()->last_category();
......
...@@ -112,6 +112,7 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace { ...@@ -112,6 +112,7 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
public: public:
Space(Heap* heap, AllocationSpace id, FreeList* free_list) Space(Heap* heap, AllocationSpace id, FreeList* free_list)
: BaseSpace(heap, id), : BaseSpace(heap, id),
allocation_counter_(heap),
free_list_(std::unique_ptr<FreeList>(free_list)) { free_list_(std::unique_ptr<FreeList>(free_list)) {
external_backing_store_bytes_ = external_backing_store_bytes_ =
new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes]; new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
...@@ -138,6 +139,13 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace { ...@@ -138,6 +139,13 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
virtual void StartNextInlineAllocationStep() {} virtual void StartNextInlineAllocationStep() {}
void AllocationStep(int bytes_since_last, Address soon_object, int size);
// An AllocationStep equivalent to be called after merging a contiguous
// chunk of an off-thread space into this space. The chunk is treated as a
// single allocation-folding group.
void AllocationStepAfterMerge(Address first_object_in_chunk, int size);
// Returns size of objects. Can differ from the allocated size // Returns size of objects. Can differ from the allocated size
// (e.g. see OldLargeObjectSpace). // (e.g. see OldLargeObjectSpace).
virtual size_t SizeOfObjects() { return Size(); } virtual size_t SizeOfObjects() { return Size(); }
...@@ -373,8 +381,6 @@ class LinearAllocationArea { ...@@ -373,8 +381,6 @@ class LinearAllocationArea {
set_limit(limit); set_limit(limit);
} }
void MoveStartToTop() { start_ = top_; }
V8_INLINE Address start() const { return start_; } V8_INLINE Address start() const { return start_; }
V8_INLINE void set_top(Address top) { V8_INLINE void set_top(Address top) {
...@@ -484,11 +490,11 @@ class LocalAllocationBuffer { ...@@ -484,11 +490,11 @@ class LocalAllocationBuffer {
class SpaceWithLinearArea : public Space { class SpaceWithLinearArea : public Space {
public: public:
SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list) SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
: Space(heap, id, free_list) { : Space(heap, id, free_list), top_on_previous_step_(0) {
allocation_info_.Reset(kNullAddress, kNullAddress); allocation_info_.Reset(kNullAddress, kNullAddress);
} }
virtual bool SupportsAllocationObserver() = 0; virtual bool SupportsInlineAllocation() = 0;
// Returns the allocation pointer in this space. // Returns the allocation pointer in this space.
Address top() { return allocation_info_.top(); } Address top() { return allocation_info_.top(); }
...@@ -502,7 +508,6 @@ class SpaceWithLinearArea : public Space { ...@@ -502,7 +508,6 @@ class SpaceWithLinearArea : public Space {
return allocation_info_.limit_address(); return allocation_info_.limit_address();
} }
// Methods needed for allocation observers.
V8_EXPORT_PRIVATE void AddAllocationObserver( V8_EXPORT_PRIVATE void AddAllocationObserver(
AllocationObserver* observer) override; AllocationObserver* observer) override;
V8_EXPORT_PRIVATE void RemoveAllocationObserver( V8_EXPORT_PRIVATE void RemoveAllocationObserver(
...@@ -510,12 +515,6 @@ class SpaceWithLinearArea : public Space { ...@@ -510,12 +515,6 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE void ResumeAllocationObservers() override; V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
V8_EXPORT_PRIVATE void PauseAllocationObservers() override; V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
V8_EXPORT_PRIVATE void AdvanceAllocationObservers();
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
size_t size_in_bytes,
size_t aligned_size_in_bytes,
size_t allocation_size);
// When allocation observers are active we may use a lower limit to allow the // When allocation observers are active we may use a lower limit to allow the
// observers to 'interrupt' earlier than the natural limit. Given a linear // observers to 'interrupt' earlier than the natural limit. Given a linear
// area bounded by [start, end), this function computes the limit to use to // area bounded by [start, end), this function computes the limit to use to
...@@ -530,8 +529,22 @@ class SpaceWithLinearArea : public Space { ...@@ -530,8 +529,22 @@ class SpaceWithLinearArea : public Space {
void PrintAllocationsOrigins(); void PrintAllocationsOrigins();
protected: protected:
// If we are doing inline allocation in steps, this method performs the 'step'
// operation. top is the memory address of the bump pointer at the last
// inline allocation (i.e. it determines the numbers of bytes actually
// allocated since the last step.) top_for_next_step is the address of the
// bump pointer where the next byte is going to be allocated from. top and
// top_for_next_step may be different when we cross a page boundary or reset
// the space.
// TODO(ofrobots): clarify the precise difference between this and
// Space::AllocationStep.
void InlineAllocationStep(Address top, Address top_for_next_step,
Address soon_object, size_t size);
V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
// TODO(ofrobots): make these private after refactoring is complete. // TODO(ofrobots): make these private after refactoring is complete.
LinearAllocationArea allocation_info_; LinearAllocationArea allocation_info_;
Address top_on_previous_step_;
size_t allocations_origins_[static_cast<int>( size_t allocations_origins_[static_cast<int>(
AllocationOrigin::kNumberOfAllocationOrigins)] = {0}; AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
......
...@@ -232,7 +232,6 @@ v8_source_set("unittests_sources") { ...@@ -232,7 +232,6 @@ v8_source_set("unittests_sources") {
"diagnostics/eh-frame-iterator-unittest.cc", "diagnostics/eh-frame-iterator-unittest.cc",
"diagnostics/eh-frame-writer-unittest.cc", "diagnostics/eh-frame-writer-unittest.cc",
"execution/microtask-queue-unittest.cc", "execution/microtask-queue-unittest.cc",
"heap/allocation-observer-unittest.cc",
"heap/barrier-unittest.cc", "heap/barrier-unittest.cc",
"heap/bitmap-test-utils.h", "heap/bitmap-test-utils.h",
"heap/bitmap-unittest.cc", "heap/bitmap-unittest.cc",
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/allocation-observer.h"
#include "src/base/logging.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
namespace {
class UnusedObserver : public AllocationObserver {
public:
explicit UnusedObserver(size_t step_size) : AllocationObserver(step_size) {}
void Step(int bytes_allocated, Address soon_object, size_t size) override {
CHECK(false);
}
};
} // namespace
TEST(AllocationObserverTest, AddAndRemoveUnusedObservers) {
AllocationCounter counter;
CHECK(!counter.IsActive());
UnusedObserver observer100(100);
UnusedObserver observer200(200);
counter.AddAllocationObserver(&observer200);
CHECK_EQ(counter.NextBytes(), 200);
counter.AddAllocationObserver(&observer100);
CHECK_EQ(counter.NextBytes(), 100);
counter.AdvanceAllocationObservers(90);
CHECK_EQ(counter.NextBytes(), 10);
counter.RemoveAllocationObserver(&observer100);
CHECK_EQ(counter.NextBytes(), 110);
counter.RemoveAllocationObserver(&observer200);
CHECK(!counter.IsActive());
}
namespace {
class VerifyStepObserver : public AllocationObserver {
public:
explicit VerifyStepObserver(size_t step_size)
: AllocationObserver(step_size) {}
void Step(int bytes_allocated, Address soon_object, size_t size) override {
CHECK(!do_not_invoke_);
invocations_++;
CHECK_EQ(expected_bytes_allocated_, bytes_allocated);
CHECK_EQ(expected_size_, size);
}
void ExpectNoInvocation() { do_not_invoke_ = true; }
void Expect(int expected_bytes_allocated, size_t expected_size) {
do_not_invoke_ = false;
expected_bytes_allocated_ = expected_bytes_allocated;
expected_size_ = expected_size;
}
int Invocations() { return invocations_; }
private:
bool do_not_invoke_ = false;
int invocations_ = 0;
int expected_bytes_allocated_ = 0;
size_t expected_size_ = 0;
};
} // namespace
TEST(AllocationObserverTest, Step) {
AllocationCounter counter;
CHECK(!counter.IsActive());
const Address kSomeObjectAddress = 8;
VerifyStepObserver observer100(100);
VerifyStepObserver observer200(200);
counter.AddAllocationObserver(&observer100);
counter.AddAllocationObserver(&observer200);
observer100.Expect(90, 8);
observer200.ExpectNoInvocation();
counter.AdvanceAllocationObservers(90);
counter.InvokeAllocationObservers(kSomeObjectAddress, 8, 10);
CHECK_EQ(observer100.Invocations(), 1);
CHECK_EQ(observer200.Invocations(), 0);
CHECK_EQ(counter.NextBytes(),
10 /* aligned_object_size */ + 100 /* smallest step size*/);
observer100.Expect(90, 16);
observer200.Expect(180, 16);
counter.AdvanceAllocationObservers(90);
counter.InvokeAllocationObservers(kSomeObjectAddress, 16, 20);
CHECK_EQ(observer100.Invocations(), 2);
CHECK_EQ(observer200.Invocations(), 1);
CHECK_EQ(counter.NextBytes(),
20 /* aligned_object_size */ + 100 /* smallest step size*/);
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment