Commit a04bc3d2 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

[heap] Refactor LinearAllocationArea

Avoid callers operating on raw top/limit where possible and provide
verification of the main invariant.

This is actually related to the refactoring suggest in v8:11958 in
that it cleans up the call sites a bit but doesn't go further than
that.

Bug: v8:11958
Change-Id: I35de29a5cd505b375408fc7c5399f637f3e9c755
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3034741
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75766}
parent 5af79486
......@@ -1254,6 +1254,7 @@ filegroup(
"src/heap/invalidated-slots.h",
"src/heap/large-spaces.cc",
"src/heap/large-spaces.h",
"src/heap/linear-allocation-area.h",
"src/heap/list.h",
"src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h",
......
......@@ -2722,6 +2722,7 @@ v8_header_set("v8_internal_headers") {
"src/heap/invalidated-slots-inl.h",
"src/heap/invalidated-slots.h",
"src/heap/large-spaces.h",
"src/heap/linear-allocation-area.h",
"src/heap/list.h",
"src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h",
......
......@@ -3461,13 +3461,9 @@ void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
void Heap::UndoLastAllocationAt(Address addr, int size) {
DCHECK_LE(0, size);
if (size == 0) return;
if (code_space_->Contains(addr)) {
Address* top = code_space_->allocation_top_address();
if (addr + size == *top && code_space_->original_top() <= addr) {
*top = addr;
if (code_space_->TryFreeLast(addr, size)) {
return;
}
}
CreateFillerObjectAt(addr, size, ClearRecordedSlots::kNo);
}
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_LINEAR_ALLOCATION_AREA_H_
#define V8_HEAP_LINEAR_ALLOCATION_AREA_H_
#include "include/v8-internal.h"
#include "src/common/checks.h"
namespace v8 {
namespace internal {
// A linear allocation area to allocate objects from.
//
// Invariant that must hold at all times:
// start <= top <= limit
class LinearAllocationArea final {
public:
LinearAllocationArea() = default;
LinearAllocationArea(Address top, Address limit)
: start_(top), top_(top), limit_(limit) {
Verify();
}
void Reset(Address top, Address limit) {
start_ = top;
top_ = top;
limit_ = limit;
Verify();
}
void ResetStart() { start_ = top_; }
V8_INLINE bool CanIncrementTop(size_t bytes) {
Verify();
return (top_ + bytes) <= limit_;
}
V8_INLINE Address IncrementTop(size_t bytes) {
Address old_top = top_;
top_ += bytes;
Verify();
return old_top;
}
V8_INLINE bool DecrementTopIfAdjacent(Address new_top, size_t bytes) {
Verify();
if ((new_top + bytes) == top_) {
top_ = new_top;
if (start_ > top_) {
ResetStart();
}
Verify();
return true;
}
return false;
}
V8_INLINE bool MergeIfAdjacent(LinearAllocationArea& other) {
Verify();
other.Verify();
if (top_ == other.limit_) {
top_ = other.top_;
start_ = other.start_;
other.Reset(kNullAddress, kNullAddress);
Verify();
return true;
}
return false;
}
V8_INLINE void SetLimit(Address limit) {
limit_ = limit;
Verify();
}
V8_INLINE Address start() const {
Verify();
return start_;
}
V8_INLINE Address top() const {
Verify();
return top_;
}
V8_INLINE Address limit() const {
Verify();
return limit_;
}
const Address* top_address() const { return &top_; }
Address* top_address() { return &top_; }
const Address* limit_address() const { return &limit_; }
Address* limit_address() { return &limit_; }
void Verify() const {
#ifdef DEBUG
SLOW_DCHECK(start_ <= top_);
SLOW_DCHECK(top_ <= limit_);
SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
#endif // DEBUG
}
private:
// The start of the LAB. Initially coincides with `top_`. As top is moved
// ahead, the area [start_, top_[ denotes a range of new objects. This range
// is reset with `ResetStart()`.
Address start_ = kNullAddress;
// The top of the LAB that is used for allocation.
Address top_ = kNullAddress;
// Limit of the LAB the denotes the end of the valid range for allocation.
Address limit_ = kNullAddress;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_LINEAR_ALLOCATION_AREA_H_
......@@ -56,7 +56,8 @@ void EvacuationAllocator::FreeLastInNewSpace(HeapObject object,
void EvacuationAllocator::FreeLastInOldSpace(HeapObject object,
int object_size) {
if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) {
if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object.address(),
object_size)) {
// We couldn't free the last object so we have to write a proper filler.
heap_->CreateFillerObjectAt(object.address(), object_size,
ClearRecordedSlots::kNo);
......
......@@ -111,13 +111,11 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + size_in_bytes);
HeapObject obj =
HeapObject::FromAddress(allocation_info_.IncrementTop(size_in_bytes));
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
......@@ -136,13 +134,11 @@ AllocationResult NewSpace::AllocateFastAligned(
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (allocation_info_.limit() - top <
static_cast<uintptr_t>(aligned_size_in_bytes)) {
if (!allocation_info_.CanIncrementTop(aligned_size_in_bytes)) {
return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + aligned_size_in_bytes);
HeapObject obj = HeapObject::FromAddress(
allocation_info_.IncrementTop(aligned_size_in_bytes));
if (result_aligned_size_in_bytes)
*result_aligned_size_in_bytes = aligned_size_in_bytes;
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
......
......@@ -500,7 +500,7 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
DCHECK_LE(top(), new_limit);
DCHECK_LE(new_limit, to_space_.page_high());
allocation_info_.set_limit(new_limit);
allocation_info_.SetLimit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
......@@ -596,11 +596,8 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
}
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
if (info.limit() != kNullAddress && info.limit() == top()) {
DCHECK_NE(info.top(), kNullAddress);
allocation_info_.set_top(info.top());
allocation_info_.MoveStartToTop();
original_top_.store(info.top(), std::memory_order_release);
if (allocation_info_.MergeIfAdjacent(info)) {
original_top_.store(allocation_info_.top(), std::memory_order_release);
}
#if DEBUG
......@@ -675,8 +672,9 @@ void NewSpace::VerifyTop() {
DCHECK_LE(allocation_info_.start(), allocation_info_.top());
DCHECK_LE(allocation_info_.top(), allocation_info_.limit());
// Ensure that original_top_ always equals LAB start.
DCHECK_EQ(original_top_, allocation_info_.start());
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
DCHECK_GE(original_top_, allocation_info_.start());
// Ensure that limit() is <= original_limit_, original_limit_ always needs
// to be end of curent to space page.
......
......@@ -78,13 +78,9 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
return added;
}
bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
if (allocation_info_.top() != kNullAddress) {
const Address object_address = object.address();
if ((allocation_info_.top() - object_size) == object_address) {
allocation_info_.set_top(object_address);
return true;
}
return allocation_info_.DecrementTopIfAdjacent(object_address, object_size);
}
return false;
}
......@@ -97,14 +93,11 @@ bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
}
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit())
if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
return AllocationResult::Retry(identity());
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return AllocationResult(HeapObject::FromAddress(current_top));
}
return AllocationResult(
HeapObject::FromAddress(allocation_info_.IncrementTop(size_in_bytes)));
}
AllocationResult PagedSpace::AllocateFastAligned(
......@@ -112,20 +105,17 @@ AllocationResult PagedSpace::AllocateFastAligned(
AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + size_in_bytes;
if (new_top > allocation_info_.limit())
int aligned_size = filler_size + size_in_bytes;
if (!allocation_info_.CanIncrementTop(aligned_size)) {
return AllocationResult::Retry(identity());
allocation_info_.set_top(new_top);
if (aligned_size_in_bytes)
*aligned_size_in_bytes = filler_size + size_in_bytes;
}
HeapObject obj =
HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
if (aligned_size_in_bytes) *aligned_size_in_bytes = aligned_size;
if (filler_size > 0) {
Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
HeapObject::FromAddress(current_top), filler_size);
obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
}
return AllocationResult(HeapObject::FromAddress(current_top + filler_size));
return AllocationResult(obj);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
......
......@@ -176,7 +176,7 @@ class V8_EXPORT_PRIVATE PagedSpace
return size_in_bytes - wasted;
}
inline bool TryFreeLast(HeapObject object, int object_size);
inline bool TryFreeLast(Address object_address, int object_size);
void ResetFreeList();
......
......@@ -138,19 +138,17 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + size_in_bytes;
if (new_top > allocation_info_.limit())
int aligned_size = filler_size + size_in_bytes;
if (!allocation_info_.CanIncrementTop(aligned_size)) {
return AllocationResult::Retry(NEW_SPACE);
allocation_info_.set_top(new_top);
}
HeapObject object =
HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
if (filler_size > 0) {
return Heap::PrecedeWithFiller(ReadOnlyRoots(heap_),
HeapObject::FromAddress(current_top),
filler_size);
return Heap::PrecedeWithFiller(ReadOnlyRoots(heap_), object, filler_size);
}
return AllocationResult(HeapObject::FromAddress(current_top));
return AllocationResult(object);
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
......@@ -165,23 +163,14 @@ LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
return LocalAllocationBuffer(heap, LinearAllocationArea(top, top + size));
}
bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
if (allocation_info_.top() == other->allocation_info_.limit()) {
allocation_info_.set_top(other->allocation_info_.top());
other->allocation_info_.Reset(kNullAddress, kNullAddress);
return true;
}
return false;
return allocation_info_.MergeIfAdjacent(other->allocation_info_);
}
bool LocalAllocationBuffer::TryFreeLast(HeapObject object, int object_size) {
if (IsValid()) {
const Address object_address = object.address();
if ((allocation_info_.top() - object_size) == object_address) {
allocation_info_.set_top(object_address);
return true;
}
return allocation_info_.DecrementTopIfAdjacent(object_address, object_size);
}
return false;
}
......
......@@ -385,7 +385,7 @@ void SpaceWithLinearArea::AdvanceAllocationObservers() {
}
void SpaceWithLinearArea::MarkLabStartInitialized() {
allocation_info_.MoveStartToTop();
allocation_info_.ResetStart();
if (identity() == NEW_SPACE) {
heap()->new_space()->MoveOriginalTopForward();
......
......@@ -17,6 +17,7 @@
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/free-list.h"
#include "src/heap/heap.h"
#include "src/heap/linear-allocation-area.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk.h"
#include "src/objects/objects.h"
......@@ -37,7 +38,6 @@ class FreeList;
class Isolate;
class LargeObjectSpace;
class LargePage;
class LinearAllocationArea;
class Page;
class PagedSpace;
class SemiSpace;
......@@ -366,61 +366,6 @@ class PageRange {
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.
// An abstraction of allocation and relocation pointers in a page-structured
// space.
class LinearAllocationArea {
public:
LinearAllocationArea()
: start_(kNullAddress), top_(kNullAddress), limit_(kNullAddress) {}
LinearAllocationArea(Address top, Address limit)
: start_(top), top_(top), limit_(limit) {}
void Reset(Address top, Address limit) {
start_ = top;
set_top(top);
set_limit(limit);
}
void MoveStartToTop() { start_ = top_; }
V8_INLINE Address start() const { return start_; }
V8_INLINE void set_top(Address top) {
SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
top_ = top;
}
V8_INLINE Address top() const {
SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
return top_;
}
Address* top_address() { return &top_; }
V8_INLINE void set_limit(Address limit) { limit_ = limit; }
V8_INLINE Address limit() const { return limit_; }
Address* limit_address() { return &limit_; }
#ifdef DEBUG
bool VerifyPagedAllocation() {
return (Page::FromAllocationAreaAddress(top_) ==
Page::FromAllocationAreaAddress(limit_)) &&
(top_ <= limit_);
}
#endif
private:
// Current allocation top.
Address start_;
// Current allocation top.
Address top_;
// Current allocation limit.
Address limit_;
};
// LocalAllocationBuffer represents a linear allocation area that is created
// from a given {AllocationResult} and can be used to allocate memory without
// synchronization.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment