Commit 2b98251c authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

heap: Move disable-new state to corresponding spaces

The information was previously kept heap-global but is really only
used by spaces when refilling their LABs.

Bug: v8:12615
Change-Id: Iee256d35ffa0112c93ec721bc3afdc2881c4743b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3465898Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79122}
parent bdbc1d65
...@@ -5595,32 +5595,31 @@ bool Heap::ShouldStressCompaction() const { ...@@ -5595,32 +5595,31 @@ bool Heap::ShouldStressCompaction() const {
} }
void Heap::EnableInlineAllocation() { void Heap::EnableInlineAllocation() {
if (!inline_allocation_disabled_) return;
inline_allocation_disabled_ = false;
// Update inline allocation limit for new space. // Update inline allocation limit for new space.
if (new_space()) { if (new_space()) {
new_space()->AdvanceAllocationObservers(); new_space()->EnableInlineAllocation();
new_space()->UpdateInlineAllocationLimit(0); }
// Update inline allocation limit for old spaces.
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
base::MutexGuard guard(space->mutex());
space->EnableInlineAllocation();
} }
} }
void Heap::DisableInlineAllocation() { void Heap::DisableInlineAllocation() {
if (inline_allocation_disabled_) return;
inline_allocation_disabled_ = true;
// Update inline allocation limit for new space. // Update inline allocation limit for new space.
if (new_space()) { if (new_space()) {
new_space()->UpdateInlineAllocationLimit(0); new_space()->DisableInlineAllocation();
} }
// Update inline allocation limit for old spaces. // Update inline allocation limit for old spaces.
PagedSpaceIterator spaces(this); PagedSpaceIterator spaces(this);
CodePageCollectionMemoryModificationScope modification_scope(this); CodePageCollectionMemoryModificationScope modification_scope(this);
for (PagedSpace* space = spaces.Next(); space != nullptr; for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) { space = spaces.Next()) {
base::MutexGuard guard(space->mutex()); base::MutexGuard guard(space->mutex());
space->FreeLinearAllocationArea(); space->DisableInlineAllocation();
} }
} }
......
...@@ -957,9 +957,6 @@ class Heap { ...@@ -957,9 +957,6 @@ class Heap {
// Inline allocation. ======================================================== // Inline allocation. ========================================================
// =========================================================================== // ===========================================================================
// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
// Switch whether inline bump-pointer allocation should be used. // Switch whether inline bump-pointer allocation should be used.
V8_EXPORT_PRIVATE void EnableInlineAllocation(); V8_EXPORT_PRIVATE void EnableInlineAllocation();
V8_EXPORT_PRIVATE void DisableInlineAllocation(); V8_EXPORT_PRIVATE void DisableInlineAllocation();
...@@ -2279,10 +2276,6 @@ class Heap { ...@@ -2279,10 +2276,6 @@ class Heap {
std::atomic<size_t> old_generation_allocation_limit_{0}; std::atomic<size_t> old_generation_allocation_limit_{0};
size_t global_allocation_limit_ = 0; size_t global_allocation_limit_ = 0;
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
bool inline_allocation_disabled_ = false;
// Weak list heads, threaded through the objects. // Weak list heads, threaded through the objects.
// List heads are initialized lazily and contain the undefined_value at start. // List heads are initialized lazily and contain the undefined_value at start.
// {native_contexts_list_} is an Address instead of an Object to allow the use // {native_contexts_list_} is an Address instead of an Object to allow the use
......
...@@ -487,7 +487,7 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -487,7 +487,7 @@ class V8_EXPORT_PRIVATE NewSpace
void MakeLinearAllocationAreaIterable(); void MakeLinearAllocationAreaIterable();
// Creates a filler object in the linear allocation area and closes it. // Creates a filler object in the linear allocation area and closes it.
void FreeLinearAllocationArea(); void FreeLinearAllocationArea() override;
private: private:
static const int kAllocationBufferParkingThreshold = 4 * KB; static const int kAllocationBufferParkingThreshold = 4 * KB;
......
...@@ -194,7 +194,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -194,7 +194,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void ResetFreeList(); void ResetFreeList();
// Empty space linear allocation area, returning unused area to free list. // Empty space linear allocation area, returning unused area to free list.
void FreeLinearAllocationArea(); void FreeLinearAllocationArea() override;
void MakeLinearAllocationAreaIterable(); void MakeLinearAllocationAreaIterable();
......
...@@ -259,10 +259,12 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end, ...@@ -259,10 +259,12 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) { size_t min_size) {
DCHECK_GE(end - start, min_size); DCHECK_GE(end - start, min_size);
if (heap()->inline_allocation_disabled()) { if (!use_lab_) {
// Fit the requested area exactly. // LABs are disabled, so we fit the requested area exactly.
return start + min_size; return start + min_size;
} else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) { }
if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
// Ensure there are no unaccounted allocations. // Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_->start(), allocation_info_->top()); DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
...@@ -277,10 +279,27 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end, ...@@ -277,10 +279,27 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
static_cast<uint64_t>(start) + std::max(min_size, rounded_step); static_cast<uint64_t>(start) + std::max(min_size, rounded_step);
uint64_t new_end = std::min(step_end, static_cast<uint64_t>(end)); uint64_t new_end = std::min(step_end, static_cast<uint64_t>(end));
return static_cast<Address>(new_end); return static_cast<Address>(new_end);
} else {
// The entire node can be used as the linear allocation area.
return end;
} }
// LABs are enabled and no observers attached. Return the whole node for the
// LAB.
return end;
}
void SpaceWithLinearArea::DisableInlineAllocation() {
if (!use_lab_) return;
use_lab_ = false;
FreeLinearAllocationArea();
UpdateInlineAllocationLimit(0);
}
void SpaceWithLinearArea::EnableInlineAllocation() {
if (use_lab_) return;
use_lab_ = true;
AdvanceAllocationObservers();
UpdateInlineAllocationLimit(0);
} }
void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) { void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
......
...@@ -474,6 +474,7 @@ class SpaceWithLinearArea : public Space { ...@@ -474,6 +474,7 @@ class SpaceWithLinearArea : public Space {
size_t allocation_size); size_t allocation_size);
void MarkLabStartInitialized(); void MarkLabStartInitialized();
virtual void FreeLinearAllocationArea() = 0;
// When allocation observers are active we may use a lower limit to allow the // When allocation observers are active we may use a lower limit to allow the
// observers to 'interrupt' earlier than the natural limit. Given a linear // observers to 'interrupt' earlier than the natural limit. Given a linear
...@@ -484,13 +485,17 @@ class SpaceWithLinearArea : public Space { ...@@ -484,13 +485,17 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit( V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
size_t min_size) = 0; size_t min_size) = 0;
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin); void DisableInlineAllocation();
void EnableInlineAllocation();
bool IsInlineAllocationEnabled() const { return use_lab_; }
void PrintAllocationsOrigins(); void PrintAllocationsOrigins();
protected: protected:
// TODO(ofrobots): make these private after refactoring is complete. V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
LinearAllocationArea* const allocation_info_; LinearAllocationArea* const allocation_info_;
bool use_lab_ = true;
size_t allocations_origins_[static_cast<int>( size_t allocations_origins_[static_cast<int>(
AllocationOrigin::kNumberOfAllocationOrigins)] = {0}; AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
......
...@@ -774,7 +774,7 @@ void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) { ...@@ -774,7 +774,7 @@ void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
// We cannot rely on `space->limit()` to point to the end of the current page // We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to // in the case where inline allocations are disabled, it actually points to
// the current allocation pointer. // the current allocation pointer.
DCHECK_IMPLIES(space->heap()->inline_allocation_disabled(), DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top()); space->limit() == space->top());
int space_remaining = int space_remaining =
static_cast<int>(space->to_space().page_high() - space->top()); static_cast<int>(space->to_space().page_high() - space->top());
......
...@@ -140,7 +140,7 @@ bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes, ...@@ -140,7 +140,7 @@ bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
// We cannot rely on `space->limit()` to point to the end of the current page // We cannot rely on `space->limit()` to point to the end of the current page
// in the case where inline allocations are disabled, it actually points to // in the case where inline allocations are disabled, it actually points to
// the current allocation pointer. // the current allocation pointer.
DCHECK_IMPLIES(space->heap()->inline_allocation_disabled(), DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top()); space->limit() == space->top());
int space_remaining = int space_remaining =
static_cast<int>(space->to_space().page_high() - space->top()); static_cast<int>(space->to_space().page_high() - space->top());
......
...@@ -3000,7 +3000,7 @@ TEST(TrackBumpPointerAllocations) { ...@@ -3000,7 +3000,7 @@ TEST(TrackBumpPointerAllocations) {
// Now check that not all allocations are tracked if we manually reenable // Now check that not all allocations are tracked if we manually reenable
// inline allocations. // inline allocations.
CHECK(CcTest::heap()->inline_allocation_disabled()); CHECK(!CcTest::heap()->new_space()->IsInlineAllocationEnabled());
CcTest::heap()->EnableInlineAllocation(); CcTest::heap()->EnableInlineAllocation();
CompileRun(inline_heap_allocation_source); CompileRun(inline_heap_allocation_source);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment