Commit 1a4294b1 authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [heap] Refactor heap object iteration (patchset #6 id:100001 of...

Revert of [heap] Refactor heap object iteration (patchset #6 id:100001 of https://codereview.chromium.org/2516303006/ )

Reason for revert:
Breaks msan:
https://build.chromium.org/p/client.v8/builders/V8%20Linux%20-%20arm64%20-%20sim%20-%20MSAN/builds/12103

Original issue's description:
> [heap] Refactor heap object iteration
>
> BUG=
>
> Committed: https://crrev.com/d094fa76f3dd0bb5fc3f4c669762aa97d88e0f76
> Cr-Commit-Position: refs/heads/master@{#41226}

TBR=hpayer@chromium.org,mlippautz@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review-Url: https://codereview.chromium.org/2529663002
Cr-Commit-Position: refs/heads/master@{#41230}
parent e4a15a7b
......@@ -78,8 +78,8 @@ void LocalArrayBufferTracker::Process(Callback callback) {
void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
DCHECK_EQ(heap->gc_state(), Heap::HeapState::SCAVENGE);
for (Page* page : PageRange(heap->new_space()->FromSpaceStart(),
heap->new_space()->FromSpaceEnd())) {
for (Page* page : NewSpacePageRange(heap->new_space()->FromSpaceStart(),
heap->new_space()->FromSpaceEnd())) {
bool empty = ProcessBuffers(page, kUpdateForwardedRemoveOthers);
CHECK(empty);
}
......
......@@ -4710,8 +4710,8 @@ void Heap::Verify() {
void Heap::ZapFromSpace() {
if (!new_space_->IsFromSpaceCommitted()) return;
for (Page* page :
PageRange(new_space_->FromSpaceStart(), new_space_->FromSpaceEnd())) {
for (Page* page : NewSpacePageRange(new_space_->FromSpaceStart(),
new_space_->FromSpaceEnd())) {
for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue;
......
......@@ -133,7 +133,7 @@ static void VerifyMarking(NewSpace* space) {
// page->area_start() as start of range on all pages.
CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
PageRange range(space->bottom(), end);
NewSpacePageRange range(space->bottom(), end);
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address limit = it != range.end() ? page->area_end() : end;
......@@ -185,21 +185,29 @@ class VerifyEvacuationVisitor : public ObjectVisitor {
static void VerifyEvacuation(Page* page) {
VerifyEvacuationVisitor visitor;
HeapObjectIterator iterator(page);
for (HeapObject* heap_object = iterator.Next(); heap_object != nullptr;
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
CHECK(!heap_object->IsFiller());
heap_object->Iterate(&visitor);
// We skip free space objects.
if (!heap_object->IsFiller()) {
heap_object->Iterate(&visitor);
}
}
}
static void VerifyEvacuation(NewSpace* space) {
VerifyEvacuationVisitor visitor;
HeapObjectIterator iterator(space);
for (HeapObject* heap_object = iterator.Next(); heap_object != nullptr;
heap_object = iterator.Next()) {
CHECK(!heap_object->IsFiller());
heap_object->Iterate(&visitor);
NewSpacePageRange range(space->bottom(), space->top());
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address current = page->area_start();
Address limit = it != range.end() ? page->area_end() : space->top();
CHECK(limit == space->top() || !page->Contains(space->top()));
while (current < limit) {
HeapObject* object = HeapObject::FromAddress(current);
object->Iterate(&visitor);
current += object->Size();
}
}
}
......@@ -324,7 +332,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
for (Page* p : PageRange(space->bottom(), space->top())) {
for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes());
}
......@@ -1956,7 +1964,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
NewSpace* space = heap()->new_space();
for (Page* page : PageRange(space->bottom(), space->top())) {
for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
DiscoverGreyObjectsOnPage(page);
if (marking_deque()->IsFull()) return;
}
......@@ -3023,7 +3031,7 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
void MarkCompactCollector::EvacuateNewSpacePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
newspace_evacuation_candidates_.Add(p);
}
new_space->Flip();
......@@ -3809,7 +3817,7 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
heap, heap->isolate()->cancelable_task_manager(), semaphore);
Address space_start = heap->new_space()->bottom();
Address space_end = heap->new_space()->top();
for (Page* page : PageRange(space_start, space_end)) {
for (Page* page : NewSpacePageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
......
......@@ -28,14 +28,10 @@ PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
return tmp;
}
PageRange::PageRange(Address start, Address limit)
: begin_(Page::FromAllocationAreaAddress(start)),
end_(Page::FromAllocationAreaAddress(limit)->next_page()) {
#ifdef DEBUG
if (begin_->InNewSpace()) {
SemiSpace::AssertValidRange(start, limit);
}
#endif // DEBUG
NewSpacePageRange::NewSpacePageRange(Address start, Address limit)
: range_(Page::FromAddress(start),
Page::FromAllocationAreaAddress(limit)->next_page()) {
SemiSpace::AssertValidRange(start, limit);
}
// -----------------------------------------------------------------------------
......@@ -72,14 +68,8 @@ HeapObject* HeapObjectIterator::Next() {
HeapObject* HeapObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
// When the current address equals top we have to handle two scenarios:
// - Old space page: Move forward to limit if top != limit. We will find
// a filler following limit.
// - New space page: We have to stop iteration before the linear allocation
// area as there are no fillers behind it.
if (cur_addr_ == space_->top() &&
(cur_addr_ != space_->limit() || current_page()->InNewSpace())) {
cur_addr_ = current_page()->InNewSpace() ? cur_end_ : space_->limit();
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
cur_addr_ = space_->limit();
continue;
}
HeapObject* obj = HeapObject::FromAddress(cur_addr_);
......@@ -89,7 +79,7 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
if (!obj->IsFiller()) {
if (obj->IsCode()) {
DCHECK_EQ(space_, space_->heap()->code_space());
DCHECK_CODEOBJECT_SIZE(obj_size, reinterpret_cast<PagedSpace*>(space_));
DCHECK_CODEOBJECT_SIZE(obj_size, space_);
} else {
DCHECK_OBJECT_SIZE(obj_size);
}
......
......@@ -29,26 +29,19 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
cur_end_(nullptr),
space_(space),
page_range_(space->anchor()->next_page(), space->anchor()),
next_page_(page_range_.begin()) {}
HeapObjectIterator::HeapObjectIterator(NewSpace* space)
: cur_addr_(nullptr),
cur_end_(nullptr),
space_(space),
page_range_(space->bottom(), space->top()),
next_page_(page_range_.begin()) {}
current_page_(page_range_.begin()) {}
HeapObjectIterator::HeapObjectIterator(Page* page)
: cur_addr_(nullptr),
cur_end_(nullptr),
space_(reinterpret_cast<SpaceWithInlineAllocationArea*>(page->owner())),
space_(reinterpret_cast<PagedSpace*>(page->owner())),
page_range_(page),
next_page_(page_range_.begin()) {
current_page_(page_range_.begin()) {
#ifdef DEBUG
Space* owner = page->owner();
Heap* heap = page->heap();
DCHECK(owner == heap->old_space() || owner == heap->map_space() ||
owner == heap->code_space() || owner == heap->new_space());
DCHECK(owner == page->heap()->old_space() ||
owner == page->heap()->map_space() ||
owner == page->heap()->code_space());
#endif // DEBUG
}
......@@ -56,8 +49,8 @@ HeapObjectIterator::HeapObjectIterator(Page* page)
// objects. This happens at the end of the page.
bool HeapObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (next_page_ == page_range_.end()) return false;
Page* cur_page = *(next_page_++);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
space_->heap()
->mark_compact_collector()
->sweeper()
......@@ -1187,9 +1180,7 @@ void Space::AllocationStep(Address soon_object, int size) {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
: SpaceWithInlineAllocationArea(heap, space, executable),
anchor_(this),
free_list_(this) {
: Space(heap, space, executable), anchor_(this), free_list_(this) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
......@@ -2104,7 +2095,7 @@ void SemiSpace::set_age_mark(Address mark) {
DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark;
// Mark all pages up to the one containing mark.
for (Page* p : PageRange(space_start(), mark)) {
for (Page* p : NewSpacePageRange(space_start(), mark)) {
p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
}
}
......
......@@ -817,6 +817,7 @@ class LargePage : public MemoryChunk {
friend class MemoryAllocator;
};
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
class Space : public Malloced {
......@@ -922,81 +923,6 @@ class Space : public Malloced {
DISALLOW_COPY_AND_ASSIGN(Space);
};
// An abstraction of allocation and relocation pointers in a page-structured
// space.
class AllocationInfo {
public:
AllocationInfo() : original_top_(nullptr), top_(nullptr), limit_(nullptr) {}
AllocationInfo(Address top, Address limit)
: original_top_(top), top_(top), limit_(limit) {}
void Reset(Address top, Address limit) {
original_top_ = top;
set_top(top);
set_limit(limit);
}
Address original_top() {
SLOW_DCHECK(top_ == NULL ||
(reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
return original_top_;
}
INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL ||
(reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
top_ = top;
}
INLINE(Address top()) const {
SLOW_DCHECK(top_ == NULL ||
(reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
return top_;
}
Address* top_address() { return &top_; }
INLINE(void set_limit(Address limit)) { limit_ = limit; }
INLINE(Address limit()) const { return limit_; }
Address* limit_address() { return &limit_; }
#ifdef DEBUG
bool VerifyPagedAllocation() {
return (Page::FromAllocationAreaAddress(top_) ==
Page::FromAllocationAreaAddress(limit_)) &&
(top_ <= limit_);
}
#endif
private:
// The original top address when the allocation info was initialized.
Address original_top_;
// Current allocation top.
Address top_;
// Current allocation limit.
Address limit_;
};
class SpaceWithInlineAllocationArea : public Space {
public:
SpaceWithInlineAllocationArea(Heap* heap, AllocationSpace id,
Executability executable)
: Space(heap, id, executable), allocation_info_(nullptr, nullptr) {}
Address top() { return allocation_info_.top(); }
Address limit() { return allocation_info_.limit(); }
Address* allocation_top_address() { return allocation_info_.top_address(); }
Address* allocation_limit_address() {
return allocation_info_.limit_address();
}
protected:
AllocationInfo allocation_info_;
};
class MemoryChunkValidator {
// Computed offsets should match the compiler generated ones.
......@@ -1490,11 +1416,8 @@ typedef PageIteratorImpl<LargePage> LargePageIterator;
class PageRange {
public:
typedef PageIterator iterator;
explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
inline PageRange(Address start, Address limit);
explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
iterator begin() { return iterator(begin_); }
iterator end() { return iterator(end_); }
......@@ -1514,8 +1437,8 @@ class PageRange {
// iterator in order to be sure to visit these new objects.
class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
explicit HeapObjectIterator(PagedSpace* space);
explicit HeapObjectIterator(NewSpace* space);
explicit HeapObjectIterator(Page* page);
// Advance to the next object, skipping free spaces and other fillers and
......@@ -1524,7 +1447,6 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
inline HeapObject* Next() override;
private:
inline Page* current_page() { return (*next_page_)->prev_page(); }
// Fast (inlined) path of next().
inline HeapObject* FromCurrentPage();
......@@ -1534,11 +1456,78 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
SpaceWithInlineAllocationArea* space_;
PagedSpace* space_;
PageRange page_range_;
PageRange::iterator next_page_;
PageRange::iterator current_page_;
};
// -----------------------------------------------------------------------------
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.
// An abstraction of allocation and relocation pointers in a page-structured
// space.
class AllocationInfo {
public:
AllocationInfo() : original_top_(nullptr), top_(nullptr), limit_(nullptr) {}
AllocationInfo(Address top, Address limit)
: original_top_(top), top_(top), limit_(limit) {}
void Reset(Address top, Address limit) {
original_top_ = top;
set_top(top);
set_limit(limit);
}
Address original_top() {
SLOW_DCHECK(top_ == NULL ||
(reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
return original_top_;
}
INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL ||
(reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
top_ = top;
}
INLINE(Address top()) const {
SLOW_DCHECK(top_ == NULL ||
(reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
return top_;
}
Address* top_address() { return &top_; }
INLINE(void set_limit(Address limit)) {
limit_ = limit;
}
INLINE(Address limit()) const {
return limit_;
}
Address* limit_address() { return &limit_; }
#ifdef DEBUG
bool VerifyPagedAllocation() {
return (Page::FromAllocationAreaAddress(top_) ==
Page::FromAllocationAreaAddress(limit_)) &&
(top_ <= limit_);
}
#endif
private:
// The original top address when the allocation info was initialized.
Address original_top_;
// Current allocation top.
Address top_;
// Current allocation limit.
Address limit_;
};
// An abstraction of the accounting statistics of a page-structured space.
//
// The stats are only set by functions that ensure they stay balanced. These
......@@ -1888,7 +1877,18 @@ class LocalAllocationBuffer {
AllocationInfo allocation_info_;
};
class PagedSpace : public SpaceWithInlineAllocationArea {
class NewSpacePageRange {
public:
typedef PageRange::iterator iterator;
inline NewSpacePageRange(Address start, Address limit);
iterator begin() { return range_.begin(); }
iterator end() { return range_.end(); }
private:
PageRange range_;
};
class PagedSpace : public Space {
public:
typedef PageIterator iterator;
......@@ -1966,6 +1966,18 @@ class PagedSpace : public SpaceWithInlineAllocationArea {
// due to being too small to use for allocation.
virtual size_t Waste() { return free_list_.wasted_bytes(); }
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top(); }
Address limit() { return allocation_info_.limit(); }
// The allocation top address.
Address* allocation_top_address() { return allocation_info_.top_address(); }
// The allocation limit address.
Address* allocation_limit_address() {
return allocation_info_.limit_address();
}
enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
// Allocate the requested number of bytes in the space if possible, return a
......@@ -2138,6 +2150,9 @@ class PagedSpace : public SpaceWithInlineAllocationArea {
// The space's free list.
FreeList free_list_;
// Normal allocation information.
AllocationInfo allocation_info_;
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
......@@ -2352,12 +2367,12 @@ class SemiSpaceIterator : public ObjectIterator {
// The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace.
class NewSpace : public SpaceWithInlineAllocationArea {
class NewSpace : public Space {
public:
typedef PageIterator iterator;
explicit NewSpace(Heap* heap)
: SpaceWithInlineAllocationArea(heap, NEW_SPACE, NOT_EXECUTABLE),
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
......@@ -2493,6 +2508,18 @@ class NewSpace : public SpaceWithInlineAllocationArea {
return to_space_.minimum_capacity();
}
// Return the address of the allocation pointer in the active semispace.
Address top() {
DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
return allocation_info_.top();
}
// Return the address of the allocation pointer limit in the active semispace.
Address limit() {
DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
return allocation_info_.limit();
}
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
......@@ -2501,6 +2528,14 @@ class NewSpace : public SpaceWithInlineAllocationArea {
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
// The allocation top and limit address.
Address* allocation_top_address() { return allocation_info_.top_address(); }
// The allocation limit address.
Address* allocation_limit_address() {
return allocation_info_.limit_address();
}
MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment));
......@@ -2617,6 +2652,10 @@ class NewSpace : public SpaceWithInlineAllocationArea {
SemiSpace from_space_;
base::VirtualMemory reservation_;
// Allocation pointer and limit for normal allocation and allocation during
// mark-compact collection.
AllocationInfo allocation_info_;
Address top_on_previous_step_;
HistogramInfo* allocated_histogram_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment